1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 202 // that predication is preferred, and this lists all options. I.e., the 203 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 204 // and predicate the instructions accordingly. If tail-folding fails, there are 205 // different fallback strategies depending on these values: 206 namespace PreferPredicateTy { 207 enum Option { 208 ScalarEpilogue = 0, 209 PredicateElseScalarEpilogue, 210 PredicateOrDontVectorize 211 }; 212 } // namespace PreferPredicateTy 213 214 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 215 "prefer-predicate-over-epilogue", 216 cl::init(PreferPredicateTy::ScalarEpilogue), 217 cl::Hidden, 218 cl::desc("Tail-folding and predication preferences over creating a scalar " 219 "epilogue loop."), 220 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 221 "scalar-epilogue", 222 "Don't tail-predicate loops, create scalar epilogue"), 223 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 224 "predicate-else-scalar-epilogue", 225 "prefer tail-folding, create scalar epilogue if tail " 226 "folding fails."), 227 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 228 "predicate-dont-vectorize", 229 "prefers tail-folding, don't attempt vectorization if " 230 "tail-folding fails."))); 231 232 static cl::opt<bool> MaximizeBandwidth( 233 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 234 cl::desc("Maximize bandwidth when selecting vectorization factor which " 235 "will be determined by the smallest type in loop.")); 236 237 static cl::opt<bool> EnableInterleavedMemAccesses( 238 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 239 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 240 241 /// An interleave-group may need masking if it resides in a block that needs 242 /// predication, or in order to mask away gaps. 243 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 244 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 245 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 246 247 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 248 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 249 cl::desc("We don't interleave loops with a estimated constant trip count " 250 "below this number")); 251 252 static cl::opt<unsigned> ForceTargetNumScalarRegs( 253 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 254 cl::desc("A flag that overrides the target's number of scalar registers.")); 255 256 static cl::opt<unsigned> ForceTargetNumVectorRegs( 257 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 258 cl::desc("A flag that overrides the target's number of vector registers.")); 259 260 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 261 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 262 cl::desc("A flag that overrides the target's max interleave factor for " 263 "scalar loops.")); 264 265 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 266 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "vectorized loops.")); 269 270 static cl::opt<unsigned> ForceTargetInstructionCost( 271 "force-target-instruction-cost", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's expected cost for " 273 "an instruction to a single constant value. Mostly " 274 "useful for getting consistent testing.")); 275 276 static cl::opt<bool> ForceTargetSupportsScalableVectors( 277 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 278 cl::desc( 279 "Pretend that scalable vectors are supported, even if the target does " 280 "not support them. This flag should only be used for testing.")); 281 282 static cl::opt<unsigned> SmallLoopCost( 283 "small-loop-cost", cl::init(20), cl::Hidden, 284 cl::desc( 285 "The cost of a loop that is considered 'small' by the interleaver.")); 286 287 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 288 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 289 cl::desc("Enable the use of the block frequency analysis to access PGO " 290 "heuristics minimizing code growth in cold regions and being more " 291 "aggressive in hot regions.")); 292 293 // Runtime interleave loops for load/store throughput. 294 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 295 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 296 cl::desc( 297 "Enable runtime interleaving until load/store ports are saturated")); 298 299 /// Interleave small loops with scalar reductions. 300 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 301 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 302 cl::desc("Enable interleaving for loops with small iteration counts that " 303 "contain scalar reductions to expose ILP.")); 304 305 /// The number of stores in a loop that are allowed to need predication. 306 static cl::opt<unsigned> NumberOfStoresToPredicate( 307 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 308 cl::desc("Max number of stores to be predicated behind an if.")); 309 310 static cl::opt<bool> EnableIndVarRegisterHeur( 311 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 312 cl::desc("Count the induction variable only once when interleaving")); 313 314 static cl::opt<bool> EnableCondStoresVectorization( 315 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 316 cl::desc("Enable if predication of stores during vectorization.")); 317 318 static cl::opt<unsigned> MaxNestedScalarReductionIC( 319 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 320 cl::desc("The maximum interleave count to use when interleaving a scalar " 321 "reduction in a nested loop.")); 322 323 static cl::opt<bool> 324 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 325 cl::Hidden, 326 cl::desc("Prefer in-loop vector reductions, " 327 "overriding the targets preference.")); 328 329 static cl::opt<bool> PreferPredicatedReductionSelect( 330 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 331 cl::desc( 332 "Prefer predicating a reduction operation over an after loop select.")); 333 334 cl::opt<bool> EnableVPlanNativePath( 335 "enable-vplan-native-path", cl::init(false), cl::Hidden, 336 cl::desc("Enable VPlan-native vectorization path with " 337 "support for outer loop vectorization.")); 338 339 // FIXME: Remove this switch once we have divergence analysis. Currently we 340 // assume divergent non-backedge branches when this switch is true. 341 cl::opt<bool> EnableVPlanPredication( 342 "enable-vplan-predication", cl::init(false), cl::Hidden, 343 cl::desc("Enable VPlan-native vectorization path predicator with " 344 "support for outer loop vectorization.")); 345 346 // This flag enables the stress testing of the VPlan H-CFG construction in the 347 // VPlan-native vectorization path. It must be used in conjuction with 348 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 349 // verification of the H-CFGs built. 350 static cl::opt<bool> VPlanBuildStressTest( 351 "vplan-build-stress-test", cl::init(false), cl::Hidden, 352 cl::desc( 353 "Build VPlan for every supported loop nest in the function and bail " 354 "out right after the build (stress test the VPlan H-CFG construction " 355 "in the VPlan-native vectorization path).")); 356 357 cl::opt<bool> llvm::EnableLoopInterleaving( 358 "interleave-loops", cl::init(true), cl::Hidden, 359 cl::desc("Enable loop interleaving in Loop vectorization passes")); 360 cl::opt<bool> llvm::EnableLoopVectorization( 361 "vectorize-loops", cl::init(true), cl::Hidden, 362 cl::desc("Run the Loop vectorization passes")); 363 364 /// A helper function that returns the type of loaded or stored value. 365 static Type *getMemInstValueType(Value *I) { 366 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 367 "Expected Load or Store instruction"); 368 if (auto *LI = dyn_cast<LoadInst>(I)) 369 return LI->getType(); 370 return cast<StoreInst>(I)->getValueOperand()->getType(); 371 } 372 373 /// A helper function that returns true if the given type is irregular. The 374 /// type is irregular if its allocated size doesn't equal the store size of an 375 /// element of the corresponding vector type at the given vectorization factor. 376 static bool hasIrregularType(Type *Ty, const DataLayout &DL, ElementCount VF) { 377 // Determine if an array of VF elements of type Ty is "bitcast compatible" 378 // with a <VF x Ty> vector. 379 if (VF.isVector()) { 380 auto *VectorTy = VectorType::get(Ty, VF); 381 return TypeSize::get(VF.getKnownMinValue() * 382 DL.getTypeAllocSize(Ty).getFixedValue(), 383 VF.isScalable()) != DL.getTypeStoreSize(VectorTy); 384 } 385 386 // If the vectorization factor is one, we just check if an array of type Ty 387 // requires padding between elements. 388 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 389 } 390 391 /// A helper function that returns the reciprocal of the block probability of 392 /// predicated blocks. If we return X, we are assuming the predicated block 393 /// will execute once for every X iterations of the loop header. 394 /// 395 /// TODO: We should use actual block probability here, if available. Currently, 396 /// we always assume predicated blocks have a 50% chance of executing. 397 static unsigned getReciprocalPredBlockProb() { return 2; } 398 399 /// A helper function that adds a 'fast' flag to floating-point operations. 400 static Value *addFastMathFlag(Value *V) { 401 if (isa<FPMathOperator>(V)) 402 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 403 return V; 404 } 405 406 /// A helper function that returns an integer or floating-point constant with 407 /// value C. 408 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 409 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 410 : ConstantFP::get(Ty, C); 411 } 412 413 /// Returns "best known" trip count for the specified loop \p L as defined by 414 /// the following procedure: 415 /// 1) Returns exact trip count if it is known. 416 /// 2) Returns expected trip count according to profile data if any. 417 /// 3) Returns upper bound estimate if it is known. 418 /// 4) Returns None if all of the above failed. 419 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 420 // Check if exact trip count is known. 421 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 422 return ExpectedTC; 423 424 // Check if there is an expected trip count available from profile data. 425 if (LoopVectorizeWithBlockFrequency) 426 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 427 return EstimatedTC; 428 429 // Check if upper bound estimate is known. 430 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 431 return ExpectedTC; 432 433 return None; 434 } 435 436 namespace llvm { 437 438 /// InnerLoopVectorizer vectorizes loops which contain only one basic 439 /// block to a specified vectorization factor (VF). 440 /// This class performs the widening of scalars into vectors, or multiple 441 /// scalars. This class also implements the following features: 442 /// * It inserts an epilogue loop for handling loops that don't have iteration 443 /// counts that are known to be a multiple of the vectorization factor. 444 /// * It handles the code generation for reduction variables. 445 /// * Scalarization (implementation using scalars) of un-vectorizable 446 /// instructions. 447 /// InnerLoopVectorizer does not perform any vectorization-legality 448 /// checks, and relies on the caller to check for the different legality 449 /// aspects. The InnerLoopVectorizer relies on the 450 /// LoopVectorizationLegality class to provide information about the induction 451 /// and reduction variables that were found to a given vectorization factor. 452 class InnerLoopVectorizer { 453 public: 454 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 455 LoopInfo *LI, DominatorTree *DT, 456 const TargetLibraryInfo *TLI, 457 const TargetTransformInfo *TTI, AssumptionCache *AC, 458 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 459 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 460 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 461 ProfileSummaryInfo *PSI) 462 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 463 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 464 Builder(PSE.getSE()->getContext()), 465 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM), 466 BFI(BFI), PSI(PSI) { 467 // Query this against the original loop and save it here because the profile 468 // of the original loop header may change as the transformation happens. 469 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 470 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 471 } 472 473 virtual ~InnerLoopVectorizer() = default; 474 475 /// Create a new empty loop that will contain vectorized instructions later 476 /// on, while the old loop will be used as the scalar remainder. Control flow 477 /// is generated around the vectorized (and scalar epilogue) loops consisting 478 /// of various checks and bypasses. Return the pre-header block of the new 479 /// loop. 480 /// In the case of epilogue vectorization, this function is overriden to 481 /// handle the more complex control flow around the loops. 482 virtual BasicBlock *createVectorizedLoopSkeleton(); 483 484 /// Widen a single instruction within the innermost loop. 485 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 486 VPTransformState &State); 487 488 /// Widen a single call instruction within the innermost loop. 489 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 490 VPTransformState &State); 491 492 /// Widen a single select instruction within the innermost loop. 493 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 494 bool InvariantCond, VPTransformState &State); 495 496 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 497 void fixVectorizedLoop(VPTransformState &State); 498 499 // Return true if any runtime check is added. 500 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 501 502 /// A type for vectorized values in the new loop. Each value from the 503 /// original loop, when vectorized, is represented by UF vector values in the 504 /// new unrolled loop, where UF is the unroll factor. 505 using VectorParts = SmallVector<Value *, 2>; 506 507 /// Vectorize a single GetElementPtrInst based on information gathered and 508 /// decisions taken during planning. 509 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 510 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 511 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 512 513 /// Vectorize a single PHINode in a block. This method handles the induction 514 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 515 /// arbitrary length vectors. 516 void widenPHIInstruction(Instruction *PN, RecurrenceDescriptor *RdxDesc, 517 Value *StartV, unsigned UF, ElementCount VF); 518 519 /// A helper function to scalarize a single Instruction in the innermost loop. 520 /// Generates a sequence of scalar instances for each lane between \p MinLane 521 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 522 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 523 /// Instr's operands. 524 void scalarizeInstruction(Instruction *Instr, VPUser &Operands, 525 const VPIteration &Instance, bool IfPredicateInstr, 526 VPTransformState &State); 527 528 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 529 /// is provided, the integer induction variable will first be truncated to 530 /// the corresponding type. 531 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 532 VPValue *Def, VPValue *CastDef, 533 VPTransformState &State); 534 535 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 536 /// vector or scalar value on-demand if one is not yet available. When 537 /// vectorizing a loop, we visit the definition of an instruction before its 538 /// uses. When visiting the definition, we either vectorize or scalarize the 539 /// instruction, creating an entry for it in the corresponding map. (In some 540 /// cases, such as induction variables, we will create both vector and scalar 541 /// entries.) Then, as we encounter uses of the definition, we derive values 542 /// for each scalar or vector use unless such a value is already available. 543 /// For example, if we scalarize a definition and one of its uses is vector, 544 /// we build the required vector on-demand with an insertelement sequence 545 /// when visiting the use. Otherwise, if the use is scalar, we can use the 546 /// existing scalar definition. 547 /// 548 /// Return a value in the new loop corresponding to \p V from the original 549 /// loop at unroll index \p Part. If the value has already been vectorized, 550 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 551 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 552 /// a new vector value on-demand by inserting the scalar values into a vector 553 /// with an insertelement sequence. If the value has been neither vectorized 554 /// nor scalarized, it must be loop invariant, so we simply broadcast the 555 /// value into a vector. 556 Value *getOrCreateVectorValue(Value *V, unsigned Part); 557 558 void setVectorValue(Value *Scalar, unsigned Part, Value *Vector) { 559 VectorLoopValueMap.setVectorValue(Scalar, Part, Vector); 560 } 561 562 void resetVectorValue(Value *Scalar, unsigned Part, Value *Vector) { 563 VectorLoopValueMap.resetVectorValue(Scalar, Part, Vector); 564 } 565 566 void setScalarValue(Value *Scalar, const VPIteration &Instance, Value *V) { 567 VectorLoopValueMap.setScalarValue(Scalar, Instance, V); 568 } 569 570 /// Return a value in the new loop corresponding to \p V from the original 571 /// loop at unroll and vector indices \p Instance. If the value has been 572 /// vectorized but not scalarized, the necessary extractelement instruction 573 /// will be generated. 574 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 575 576 /// Construct the vector value of a scalarized value \p V one lane at a time. 577 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 578 579 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 580 VPTransformState &State); 581 582 /// Try to vectorize interleaved access group \p Group with the base address 583 /// given in \p Addr, optionally masking the vector operations if \p 584 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 585 /// values in the vectorized loop. 586 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 587 ArrayRef<VPValue *> VPDefs, 588 VPTransformState &State, VPValue *Addr, 589 ArrayRef<VPValue *> StoredValues, 590 VPValue *BlockInMask = nullptr); 591 592 /// Vectorize Load and Store instructions with the base address given in \p 593 /// Addr, optionally masking the vector operations if \p BlockInMask is 594 /// non-null. Use \p State to translate given VPValues to IR values in the 595 /// vectorized loop. 596 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 597 VPValue *Def, VPValue *Addr, 598 VPValue *StoredValue, VPValue *BlockInMask); 599 600 /// Set the debug location in the builder using the debug location in 601 /// the instruction. 602 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 603 604 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 605 void fixNonInductionPHIs(VPTransformState &State); 606 607 /// Create a broadcast instruction. This method generates a broadcast 608 /// instruction (shuffle) for loop invariant values and for the induction 609 /// value. If this is the induction variable then we extend it to N, N+1, ... 610 /// this is needed because each iteration in the loop corresponds to a SIMD 611 /// element. 612 virtual Value *getBroadcastInstrs(Value *V); 613 614 protected: 615 friend class LoopVectorizationPlanner; 616 617 /// A small list of PHINodes. 618 using PhiVector = SmallVector<PHINode *, 4>; 619 620 /// A type for scalarized values in the new loop. Each value from the 621 /// original loop, when scalarized, is represented by UF x VF scalar values 622 /// in the new unrolled loop, where UF is the unroll factor and VF is the 623 /// vectorization factor. 624 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 625 626 /// Set up the values of the IVs correctly when exiting the vector loop. 627 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 628 Value *CountRoundDown, Value *EndValue, 629 BasicBlock *MiddleBlock); 630 631 /// Create a new induction variable inside L. 632 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 633 Value *Step, Instruction *DL); 634 635 /// Handle all cross-iteration phis in the header. 636 void fixCrossIterationPHIs(VPTransformState &State); 637 638 /// Fix a first-order recurrence. This is the second phase of vectorizing 639 /// this phi node. 640 void fixFirstOrderRecurrence(PHINode *Phi, VPTransformState &State); 641 642 /// Fix a reduction cross-iteration phi. This is the second phase of 643 /// vectorizing this phi node. 644 void fixReduction(PHINode *Phi, VPTransformState &State); 645 646 /// Clear NSW/NUW flags from reduction instructions if necessary. 647 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc); 648 649 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 650 /// means we need to add the appropriate incoming value from the middle 651 /// block as exiting edges from the scalar epilogue loop (if present) are 652 /// already in place, and we exit the vector loop exclusively to the middle 653 /// block. 654 void fixLCSSAPHIs(VPTransformState &State); 655 656 /// Iteratively sink the scalarized operands of a predicated instruction into 657 /// the block that was created for it. 658 void sinkScalarOperands(Instruction *PredInst); 659 660 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 661 /// represented as. 662 void truncateToMinimalBitwidths(); 663 664 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 665 /// to each vector element of Val. The sequence starts at StartIndex. 666 /// \p Opcode is relevant for FP induction variable. 667 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 668 Instruction::BinaryOps Opcode = 669 Instruction::BinaryOpsEnd); 670 671 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 672 /// variable on which to base the steps, \p Step is the size of the step, and 673 /// \p EntryVal is the value from the original loop that maps to the steps. 674 /// Note that \p EntryVal doesn't have to be an induction variable - it 675 /// can also be a truncate instruction. 676 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 677 const InductionDescriptor &ID, VPValue *Def, 678 VPValue *CastDef, VPTransformState &State); 679 680 /// Create a vector induction phi node based on an existing scalar one. \p 681 /// EntryVal is the value from the original loop that maps to the vector phi 682 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 683 /// truncate instruction, instead of widening the original IV, we widen a 684 /// version of the IV truncated to \p EntryVal's type. 685 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 686 Value *Step, Value *Start, 687 Instruction *EntryVal, VPValue *Def, 688 VPValue *CastDef, 689 VPTransformState &State); 690 691 /// Returns true if an instruction \p I should be scalarized instead of 692 /// vectorized for the chosen vectorization factor. 693 bool shouldScalarizeInstruction(Instruction *I) const; 694 695 /// Returns true if we should generate a scalar version of \p IV. 696 bool needsScalarInduction(Instruction *IV) const; 697 698 /// If there is a cast involved in the induction variable \p ID, which should 699 /// be ignored in the vectorized loop body, this function records the 700 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 701 /// cast. We had already proved that the casted Phi is equal to the uncasted 702 /// Phi in the vectorized loop (under a runtime guard), and therefore 703 /// there is no need to vectorize the cast - the same value can be used in the 704 /// vector loop for both the Phi and the cast. 705 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 706 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 707 /// 708 /// \p EntryVal is the value from the original loop that maps to the vector 709 /// phi node and is used to distinguish what is the IV currently being 710 /// processed - original one (if \p EntryVal is a phi corresponding to the 711 /// original IV) or the "newly-created" one based on the proof mentioned above 712 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 713 /// latter case \p EntryVal is a TruncInst and we must not record anything for 714 /// that IV, but it's error-prone to expect callers of this routine to care 715 /// about that, hence this explicit parameter. 716 void recordVectorLoopValueForInductionCast( 717 const InductionDescriptor &ID, const Instruction *EntryVal, 718 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 719 unsigned Part, unsigned Lane = UINT_MAX); 720 721 /// Generate a shuffle sequence that will reverse the vector Vec. 722 virtual Value *reverseVector(Value *Vec); 723 724 /// Returns (and creates if needed) the original loop trip count. 725 Value *getOrCreateTripCount(Loop *NewLoop); 726 727 /// Returns (and creates if needed) the trip count of the widened loop. 728 Value *getOrCreateVectorTripCount(Loop *NewLoop); 729 730 /// Returns a bitcasted value to the requested vector type. 731 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 732 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 733 const DataLayout &DL); 734 735 /// Emit a bypass check to see if the vector trip count is zero, including if 736 /// it overflows. 737 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 738 739 /// Emit a bypass check to see if all of the SCEV assumptions we've 740 /// had to make are correct. 741 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 742 743 /// Emit bypass checks to check any memory assumptions we may have made. 744 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 745 746 /// Compute the transformed value of Index at offset StartValue using step 747 /// StepValue. 748 /// For integer induction, returns StartValue + Index * StepValue. 749 /// For pointer induction, returns StartValue[Index * StepValue]. 750 /// FIXME: The newly created binary instructions should contain nsw/nuw 751 /// flags, which can be found from the original scalar operations. 752 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 753 const DataLayout &DL, 754 const InductionDescriptor &ID) const; 755 756 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 757 /// vector loop preheader, middle block and scalar preheader. Also 758 /// allocate a loop object for the new vector loop and return it. 759 Loop *createVectorLoopSkeleton(StringRef Prefix); 760 761 /// Create new phi nodes for the induction variables to resume iteration count 762 /// in the scalar epilogue, from where the vectorized loop left off (given by 763 /// \p VectorTripCount). 764 /// In cases where the loop skeleton is more complicated (eg. epilogue 765 /// vectorization) and the resume values can come from an additional bypass 766 /// block, the \p AdditionalBypass pair provides information about the bypass 767 /// block and the end value on the edge from bypass to this loop. 768 void createInductionResumeValues( 769 Loop *L, Value *VectorTripCount, 770 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 771 772 /// Complete the loop skeleton by adding debug MDs, creating appropriate 773 /// conditional branches in the middle block, preparing the builder and 774 /// running the verifier. Take in the vector loop \p L as argument, and return 775 /// the preheader of the completed vector loop. 776 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 777 778 /// Add additional metadata to \p To that was not present on \p Orig. 779 /// 780 /// Currently this is used to add the noalias annotations based on the 781 /// inserted memchecks. Use this for instructions that are *cloned* into the 782 /// vector loop. 783 void addNewMetadata(Instruction *To, const Instruction *Orig); 784 785 /// Add metadata from one instruction to another. 786 /// 787 /// This includes both the original MDs from \p From and additional ones (\see 788 /// addNewMetadata). Use this for *newly created* instructions in the vector 789 /// loop. 790 void addMetadata(Instruction *To, Instruction *From); 791 792 /// Similar to the previous function but it adds the metadata to a 793 /// vector of instructions. 794 void addMetadata(ArrayRef<Value *> To, Instruction *From); 795 796 /// Allow subclasses to override and print debug traces before/after vplan 797 /// execution, when trace information is requested. 798 virtual void printDebugTracesAtStart(){}; 799 virtual void printDebugTracesAtEnd(){}; 800 801 /// The original loop. 802 Loop *OrigLoop; 803 804 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 805 /// dynamic knowledge to simplify SCEV expressions and converts them to a 806 /// more usable form. 807 PredicatedScalarEvolution &PSE; 808 809 /// Loop Info. 810 LoopInfo *LI; 811 812 /// Dominator Tree. 813 DominatorTree *DT; 814 815 /// Alias Analysis. 816 AAResults *AA; 817 818 /// Target Library Info. 819 const TargetLibraryInfo *TLI; 820 821 /// Target Transform Info. 822 const TargetTransformInfo *TTI; 823 824 /// Assumption Cache. 825 AssumptionCache *AC; 826 827 /// Interface to emit optimization remarks. 828 OptimizationRemarkEmitter *ORE; 829 830 /// LoopVersioning. It's only set up (non-null) if memchecks were 831 /// used. 832 /// 833 /// This is currently only used to add no-alias metadata based on the 834 /// memchecks. The actually versioning is performed manually. 835 std::unique_ptr<LoopVersioning> LVer; 836 837 /// The vectorization SIMD factor to use. Each vector will have this many 838 /// vector elements. 839 ElementCount VF; 840 841 /// The vectorization unroll factor to use. Each scalar is vectorized to this 842 /// many different vector instructions. 843 unsigned UF; 844 845 /// The builder that we use 846 IRBuilder<> Builder; 847 848 // --- Vectorization state --- 849 850 /// The vector-loop preheader. 851 BasicBlock *LoopVectorPreHeader; 852 853 /// The scalar-loop preheader. 854 BasicBlock *LoopScalarPreHeader; 855 856 /// Middle Block between the vector and the scalar. 857 BasicBlock *LoopMiddleBlock; 858 859 /// The (unique) ExitBlock of the scalar loop. Note that 860 /// there can be multiple exiting edges reaching this block. 861 BasicBlock *LoopExitBlock; 862 863 /// The vector loop body. 864 BasicBlock *LoopVectorBody; 865 866 /// The scalar loop body. 867 BasicBlock *LoopScalarBody; 868 869 /// A list of all bypass blocks. The first block is the entry of the loop. 870 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 871 872 /// The new Induction variable which was added to the new block. 873 PHINode *Induction = nullptr; 874 875 /// The induction variable of the old basic block. 876 PHINode *OldInduction = nullptr; 877 878 /// Maps values from the original loop to their corresponding values in the 879 /// vectorized loop. A key value can map to either vector values, scalar 880 /// values or both kinds of values, depending on whether the key was 881 /// vectorized and scalarized. 882 VectorizerValueMap VectorLoopValueMap; 883 884 /// Store instructions that were predicated. 885 SmallVector<Instruction *, 4> PredicatedInstructions; 886 887 /// Trip count of the original loop. 888 Value *TripCount = nullptr; 889 890 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 891 Value *VectorTripCount = nullptr; 892 893 /// The legality analysis. 894 LoopVectorizationLegality *Legal; 895 896 /// The profitablity analysis. 897 LoopVectorizationCostModel *Cost; 898 899 // Record whether runtime checks are added. 900 bool AddedSafetyChecks = false; 901 902 // Holds the end values for each induction variable. We save the end values 903 // so we can later fix-up the external users of the induction variables. 904 DenseMap<PHINode *, Value *> IVEndValues; 905 906 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 907 // fixed up at the end of vector code generation. 908 SmallVector<PHINode *, 8> OrigPHIsToFix; 909 910 /// BFI and PSI are used to check for profile guided size optimizations. 911 BlockFrequencyInfo *BFI; 912 ProfileSummaryInfo *PSI; 913 914 // Whether this loop should be optimized for size based on profile guided size 915 // optimizatios. 916 bool OptForSizeBasedOnProfile; 917 }; 918 919 class InnerLoopUnroller : public InnerLoopVectorizer { 920 public: 921 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 922 LoopInfo *LI, DominatorTree *DT, 923 const TargetLibraryInfo *TLI, 924 const TargetTransformInfo *TTI, AssumptionCache *AC, 925 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 926 LoopVectorizationLegality *LVL, 927 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 928 ProfileSummaryInfo *PSI) 929 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 930 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 931 BFI, PSI) {} 932 933 private: 934 Value *getBroadcastInstrs(Value *V) override; 935 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 936 Instruction::BinaryOps Opcode = 937 Instruction::BinaryOpsEnd) override; 938 Value *reverseVector(Value *Vec) override; 939 }; 940 941 /// Encapsulate information regarding vectorization of a loop and its epilogue. 942 /// This information is meant to be updated and used across two stages of 943 /// epilogue vectorization. 944 struct EpilogueLoopVectorizationInfo { 945 ElementCount MainLoopVF = ElementCount::getFixed(0); 946 unsigned MainLoopUF = 0; 947 ElementCount EpilogueVF = ElementCount::getFixed(0); 948 unsigned EpilogueUF = 0; 949 BasicBlock *MainLoopIterationCountCheck = nullptr; 950 BasicBlock *EpilogueIterationCountCheck = nullptr; 951 BasicBlock *SCEVSafetyCheck = nullptr; 952 BasicBlock *MemSafetyCheck = nullptr; 953 Value *TripCount = nullptr; 954 Value *VectorTripCount = nullptr; 955 956 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 957 unsigned EUF) 958 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 959 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 960 assert(EUF == 1 && 961 "A high UF for the epilogue loop is likely not beneficial."); 962 } 963 }; 964 965 /// An extension of the inner loop vectorizer that creates a skeleton for a 966 /// vectorized loop that has its epilogue (residual) also vectorized. 967 /// The idea is to run the vplan on a given loop twice, firstly to setup the 968 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 969 /// from the first step and vectorize the epilogue. This is achieved by 970 /// deriving two concrete strategy classes from this base class and invoking 971 /// them in succession from the loop vectorizer planner. 972 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 973 public: 974 InnerLoopAndEpilogueVectorizer( 975 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 976 DominatorTree *DT, const TargetLibraryInfo *TLI, 977 const TargetTransformInfo *TTI, AssumptionCache *AC, 978 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 979 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 980 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 981 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 982 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI), 983 EPI(EPI) {} 984 985 // Override this function to handle the more complex control flow around the 986 // three loops. 987 BasicBlock *createVectorizedLoopSkeleton() final override { 988 return createEpilogueVectorizedLoopSkeleton(); 989 } 990 991 /// The interface for creating a vectorized skeleton using one of two 992 /// different strategies, each corresponding to one execution of the vplan 993 /// as described above. 994 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 995 996 /// Holds and updates state information required to vectorize the main loop 997 /// and its epilogue in two separate passes. This setup helps us avoid 998 /// regenerating and recomputing runtime safety checks. It also helps us to 999 /// shorten the iteration-count-check path length for the cases where the 1000 /// iteration count of the loop is so small that the main vector loop is 1001 /// completely skipped. 1002 EpilogueLoopVectorizationInfo &EPI; 1003 }; 1004 1005 /// A specialized derived class of inner loop vectorizer that performs 1006 /// vectorization of *main* loops in the process of vectorizing loops and their 1007 /// epilogues. 1008 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 1009 public: 1010 EpilogueVectorizerMainLoop( 1011 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1012 DominatorTree *DT, const TargetLibraryInfo *TLI, 1013 const TargetTransformInfo *TTI, AssumptionCache *AC, 1014 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1015 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1016 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 1017 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1018 EPI, LVL, CM, BFI, PSI) {} 1019 /// Implements the interface for creating a vectorized skeleton using the 1020 /// *main loop* strategy (ie the first pass of vplan execution). 1021 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1022 1023 protected: 1024 /// Emits an iteration count bypass check once for the main loop (when \p 1025 /// ForEpilogue is false) and once for the epilogue loop (when \p 1026 /// ForEpilogue is true). 1027 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 1028 bool ForEpilogue); 1029 void printDebugTracesAtStart() override; 1030 void printDebugTracesAtEnd() override; 1031 }; 1032 1033 // A specialized derived class of inner loop vectorizer that performs 1034 // vectorization of *epilogue* loops in the process of vectorizing loops and 1035 // their epilogues. 1036 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 1037 public: 1038 EpilogueVectorizerEpilogueLoop(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 1039 LoopInfo *LI, DominatorTree *DT, 1040 const TargetLibraryInfo *TLI, 1041 const TargetTransformInfo *TTI, AssumptionCache *AC, 1042 OptimizationRemarkEmitter *ORE, 1043 EpilogueLoopVectorizationInfo &EPI, 1044 LoopVectorizationLegality *LVL, 1045 llvm::LoopVectorizationCostModel *CM, 1046 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) 1047 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1048 EPI, LVL, CM, BFI, PSI) {} 1049 /// Implements the interface for creating a vectorized skeleton using the 1050 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1051 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1052 1053 protected: 1054 /// Emits an iteration count bypass check after the main vector loop has 1055 /// finished to see if there are any iterations left to execute by either 1056 /// the vector epilogue or the scalar epilogue. 1057 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1058 BasicBlock *Bypass, 1059 BasicBlock *Insert); 1060 void printDebugTracesAtStart() override; 1061 void printDebugTracesAtEnd() override; 1062 }; 1063 } // end namespace llvm 1064 1065 /// Look for a meaningful debug location on the instruction or it's 1066 /// operands. 1067 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1068 if (!I) 1069 return I; 1070 1071 DebugLoc Empty; 1072 if (I->getDebugLoc() != Empty) 1073 return I; 1074 1075 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 1076 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 1077 if (OpInst->getDebugLoc() != Empty) 1078 return OpInst; 1079 } 1080 1081 return I; 1082 } 1083 1084 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 1085 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 1086 const DILocation *DIL = Inst->getDebugLoc(); 1087 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1088 !isa<DbgInfoIntrinsic>(Inst)) { 1089 assert(!VF.isScalable() && "scalable vectors not yet supported."); 1090 auto NewDIL = 1091 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1092 if (NewDIL) 1093 B.SetCurrentDebugLocation(NewDIL.getValue()); 1094 else 1095 LLVM_DEBUG(dbgs() 1096 << "Failed to create new discriminator: " 1097 << DIL->getFilename() << " Line: " << DIL->getLine()); 1098 } 1099 else 1100 B.SetCurrentDebugLocation(DIL); 1101 } else 1102 B.SetCurrentDebugLocation(DebugLoc()); 1103 } 1104 1105 /// Write a record \p DebugMsg about vectorization failure to the debug 1106 /// output stream. If \p I is passed, it is an instruction that prevents 1107 /// vectorization. 1108 #ifndef NDEBUG 1109 static void debugVectorizationFailure(const StringRef DebugMsg, 1110 Instruction *I) { 1111 dbgs() << "LV: Not vectorizing: " << DebugMsg; 1112 if (I != nullptr) 1113 dbgs() << " " << *I; 1114 else 1115 dbgs() << '.'; 1116 dbgs() << '\n'; 1117 } 1118 #endif 1119 1120 /// Create an analysis remark that explains why vectorization failed 1121 /// 1122 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1123 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1124 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1125 /// the location of the remark. \return the remark object that can be 1126 /// streamed to. 1127 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1128 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1129 Value *CodeRegion = TheLoop->getHeader(); 1130 DebugLoc DL = TheLoop->getStartLoc(); 1131 1132 if (I) { 1133 CodeRegion = I->getParent(); 1134 // If there is no debug location attached to the instruction, revert back to 1135 // using the loop's. 1136 if (I->getDebugLoc()) 1137 DL = I->getDebugLoc(); 1138 } 1139 1140 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 1141 R << "loop not vectorized: "; 1142 return R; 1143 } 1144 1145 /// Return a value for Step multiplied by VF. 1146 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1147 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1148 Constant *StepVal = ConstantInt::get( 1149 Step->getType(), 1150 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1151 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1152 } 1153 1154 namespace llvm { 1155 1156 void reportVectorizationFailure(const StringRef DebugMsg, 1157 const StringRef OREMsg, const StringRef ORETag, 1158 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 1159 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 1160 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1161 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 1162 ORETag, TheLoop, I) << OREMsg); 1163 } 1164 1165 } // end namespace llvm 1166 1167 #ifndef NDEBUG 1168 /// \return string containing a file name and a line # for the given loop. 1169 static std::string getDebugLocString(const Loop *L) { 1170 std::string Result; 1171 if (L) { 1172 raw_string_ostream OS(Result); 1173 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1174 LoopDbgLoc.print(OS); 1175 else 1176 // Just print the module name. 1177 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1178 OS.flush(); 1179 } 1180 return Result; 1181 } 1182 #endif 1183 1184 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1185 const Instruction *Orig) { 1186 // If the loop was versioned with memchecks, add the corresponding no-alias 1187 // metadata. 1188 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1189 LVer->annotateInstWithNoAlias(To, Orig); 1190 } 1191 1192 void InnerLoopVectorizer::addMetadata(Instruction *To, 1193 Instruction *From) { 1194 propagateMetadata(To, From); 1195 addNewMetadata(To, From); 1196 } 1197 1198 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1199 Instruction *From) { 1200 for (Value *V : To) { 1201 if (Instruction *I = dyn_cast<Instruction>(V)) 1202 addMetadata(I, From); 1203 } 1204 } 1205 1206 namespace llvm { 1207 1208 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1209 // lowered. 1210 enum ScalarEpilogueLowering { 1211 1212 // The default: allowing scalar epilogues. 1213 CM_ScalarEpilogueAllowed, 1214 1215 // Vectorization with OptForSize: don't allow epilogues. 1216 CM_ScalarEpilogueNotAllowedOptSize, 1217 1218 // A special case of vectorisation with OptForSize: loops with a very small 1219 // trip count are considered for vectorization under OptForSize, thereby 1220 // making sure the cost of their loop body is dominant, free of runtime 1221 // guards and scalar iteration overheads. 1222 CM_ScalarEpilogueNotAllowedLowTripLoop, 1223 1224 // Loop hint predicate indicating an epilogue is undesired. 1225 CM_ScalarEpilogueNotNeededUsePredicate, 1226 1227 // Directive indicating we must either tail fold or not vectorize 1228 CM_ScalarEpilogueNotAllowedUsePredicate 1229 }; 1230 1231 /// LoopVectorizationCostModel - estimates the expected speedups due to 1232 /// vectorization. 1233 /// In many cases vectorization is not profitable. This can happen because of 1234 /// a number of reasons. In this class we mainly attempt to predict the 1235 /// expected speedup/slowdowns due to the supported instruction set. We use the 1236 /// TargetTransformInfo to query the different backends for the cost of 1237 /// different operations. 1238 class LoopVectorizationCostModel { 1239 public: 1240 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1241 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1242 LoopVectorizationLegality *Legal, 1243 const TargetTransformInfo &TTI, 1244 const TargetLibraryInfo *TLI, DemandedBits *DB, 1245 AssumptionCache *AC, 1246 OptimizationRemarkEmitter *ORE, const Function *F, 1247 const LoopVectorizeHints *Hints, 1248 InterleavedAccessInfo &IAI) 1249 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1250 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1251 Hints(Hints), InterleaveInfo(IAI) {} 1252 1253 /// \return An upper bound for the vectorization factor, or None if 1254 /// vectorization and interleaving should be avoided up front. 1255 Optional<ElementCount> computeMaxVF(ElementCount UserVF, unsigned UserIC); 1256 1257 /// \return True if runtime checks are required for vectorization, and false 1258 /// otherwise. 1259 bool runtimeChecksRequired(); 1260 1261 /// \return The most profitable vectorization factor and the cost of that VF. 1262 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1263 /// then this vectorization factor will be selected if vectorization is 1264 /// possible. 1265 VectorizationFactor selectVectorizationFactor(ElementCount MaxVF); 1266 VectorizationFactor 1267 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1268 const LoopVectorizationPlanner &LVP); 1269 1270 /// Setup cost-based decisions for user vectorization factor. 1271 void selectUserVectorizationFactor(ElementCount UserVF) { 1272 collectUniformsAndScalars(UserVF); 1273 collectInstsToScalarize(UserVF); 1274 } 1275 1276 /// \return The size (in bits) of the smallest and widest types in the code 1277 /// that needs to be vectorized. We ignore values that remain scalar such as 1278 /// 64 bit loop indices. 1279 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1280 1281 /// \return The desired interleave count. 1282 /// If interleave count has been specified by metadata it will be returned. 1283 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1284 /// are the selected vectorization factor and the cost of the selected VF. 1285 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1286 1287 /// Memory access instruction may be vectorized in more than one way. 1288 /// Form of instruction after vectorization depends on cost. 1289 /// This function takes cost-based decisions for Load/Store instructions 1290 /// and collects them in a map. This decisions map is used for building 1291 /// the lists of loop-uniform and loop-scalar instructions. 1292 /// The calculated cost is saved with widening decision in order to 1293 /// avoid redundant calculations. 1294 void setCostBasedWideningDecision(ElementCount VF); 1295 1296 /// A struct that represents some properties of the register usage 1297 /// of a loop. 1298 struct RegisterUsage { 1299 /// Holds the number of loop invariant values that are used in the loop. 1300 /// The key is ClassID of target-provided register class. 1301 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1302 /// Holds the maximum number of concurrent live intervals in the loop. 1303 /// The key is ClassID of target-provided register class. 1304 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1305 }; 1306 1307 /// \return Returns information about the register usages of the loop for the 1308 /// given vectorization factors. 1309 SmallVector<RegisterUsage, 8> 1310 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1311 1312 /// Collect values we want to ignore in the cost model. 1313 void collectValuesToIgnore(); 1314 1315 /// Split reductions into those that happen in the loop, and those that happen 1316 /// outside. In loop reductions are collected into InLoopReductionChains. 1317 void collectInLoopReductions(); 1318 1319 /// \returns The smallest bitwidth each instruction can be represented with. 1320 /// The vector equivalents of these instructions should be truncated to this 1321 /// type. 1322 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1323 return MinBWs; 1324 } 1325 1326 /// \returns True if it is more profitable to scalarize instruction \p I for 1327 /// vectorization factor \p VF. 1328 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1329 assert(VF.isVector() && 1330 "Profitable to scalarize relevant only for VF > 1."); 1331 1332 // Cost model is not run in the VPlan-native path - return conservative 1333 // result until this changes. 1334 if (EnableVPlanNativePath) 1335 return false; 1336 1337 auto Scalars = InstsToScalarize.find(VF); 1338 assert(Scalars != InstsToScalarize.end() && 1339 "VF not yet analyzed for scalarization profitability"); 1340 return Scalars->second.find(I) != Scalars->second.end(); 1341 } 1342 1343 /// Returns true if \p I is known to be uniform after vectorization. 1344 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1345 if (VF.isScalar()) 1346 return true; 1347 1348 // Cost model is not run in the VPlan-native path - return conservative 1349 // result until this changes. 1350 if (EnableVPlanNativePath) 1351 return false; 1352 1353 auto UniformsPerVF = Uniforms.find(VF); 1354 assert(UniformsPerVF != Uniforms.end() && 1355 "VF not yet analyzed for uniformity"); 1356 return UniformsPerVF->second.count(I); 1357 } 1358 1359 /// Returns true if \p I is known to be scalar after vectorization. 1360 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1361 if (VF.isScalar()) 1362 return true; 1363 1364 // Cost model is not run in the VPlan-native path - return conservative 1365 // result until this changes. 1366 if (EnableVPlanNativePath) 1367 return false; 1368 1369 auto ScalarsPerVF = Scalars.find(VF); 1370 assert(ScalarsPerVF != Scalars.end() && 1371 "Scalar values are not calculated for VF"); 1372 return ScalarsPerVF->second.count(I); 1373 } 1374 1375 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1376 /// for vectorization factor \p VF. 1377 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1378 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1379 !isProfitableToScalarize(I, VF) && 1380 !isScalarAfterVectorization(I, VF); 1381 } 1382 1383 /// Decision that was taken during cost calculation for memory instruction. 1384 enum InstWidening { 1385 CM_Unknown, 1386 CM_Widen, // For consecutive accesses with stride +1. 1387 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1388 CM_Interleave, 1389 CM_GatherScatter, 1390 CM_Scalarize 1391 }; 1392 1393 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1394 /// instruction \p I and vector width \p VF. 1395 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1396 InstructionCost Cost) { 1397 assert(VF.isVector() && "Expected VF >=2"); 1398 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1399 } 1400 1401 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1402 /// interleaving group \p Grp and vector width \p VF. 1403 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1404 ElementCount VF, InstWidening W, 1405 InstructionCost Cost) { 1406 assert(VF.isVector() && "Expected VF >=2"); 1407 /// Broadcast this decicion to all instructions inside the group. 1408 /// But the cost will be assigned to one instruction only. 1409 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1410 if (auto *I = Grp->getMember(i)) { 1411 if (Grp->getInsertPos() == I) 1412 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1413 else 1414 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1415 } 1416 } 1417 } 1418 1419 /// Return the cost model decision for the given instruction \p I and vector 1420 /// width \p VF. Return CM_Unknown if this instruction did not pass 1421 /// through the cost modeling. 1422 InstWidening getWideningDecision(Instruction *I, ElementCount VF) { 1423 assert(VF.isVector() && "Expected VF to be a vector VF"); 1424 // Cost model is not run in the VPlan-native path - return conservative 1425 // result until this changes. 1426 if (EnableVPlanNativePath) 1427 return CM_GatherScatter; 1428 1429 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1430 auto Itr = WideningDecisions.find(InstOnVF); 1431 if (Itr == WideningDecisions.end()) 1432 return CM_Unknown; 1433 return Itr->second.first; 1434 } 1435 1436 /// Return the vectorization cost for the given instruction \p I and vector 1437 /// width \p VF. 1438 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1439 assert(VF.isVector() && "Expected VF >=2"); 1440 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1441 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1442 "The cost is not calculated"); 1443 return WideningDecisions[InstOnVF].second; 1444 } 1445 1446 /// Return True if instruction \p I is an optimizable truncate whose operand 1447 /// is an induction variable. Such a truncate will be removed by adding a new 1448 /// induction variable with the destination type. 1449 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1450 // If the instruction is not a truncate, return false. 1451 auto *Trunc = dyn_cast<TruncInst>(I); 1452 if (!Trunc) 1453 return false; 1454 1455 // Get the source and destination types of the truncate. 1456 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1457 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1458 1459 // If the truncate is free for the given types, return false. Replacing a 1460 // free truncate with an induction variable would add an induction variable 1461 // update instruction to each iteration of the loop. We exclude from this 1462 // check the primary induction variable since it will need an update 1463 // instruction regardless. 1464 Value *Op = Trunc->getOperand(0); 1465 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1466 return false; 1467 1468 // If the truncated value is not an induction variable, return false. 1469 return Legal->isInductionPhi(Op); 1470 } 1471 1472 /// Collects the instructions to scalarize for each predicated instruction in 1473 /// the loop. 1474 void collectInstsToScalarize(ElementCount VF); 1475 1476 /// Collect Uniform and Scalar values for the given \p VF. 1477 /// The sets depend on CM decision for Load/Store instructions 1478 /// that may be vectorized as interleave, gather-scatter or scalarized. 1479 void collectUniformsAndScalars(ElementCount VF) { 1480 // Do the analysis once. 1481 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1482 return; 1483 setCostBasedWideningDecision(VF); 1484 collectLoopUniforms(VF); 1485 collectLoopScalars(VF); 1486 } 1487 1488 /// Returns true if the target machine supports masked store operation 1489 /// for the given \p DataType and kind of access to \p Ptr. 1490 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) { 1491 return Legal->isConsecutivePtr(Ptr) && 1492 TTI.isLegalMaskedStore(DataType, Alignment); 1493 } 1494 1495 /// Returns true if the target machine supports masked load operation 1496 /// for the given \p DataType and kind of access to \p Ptr. 1497 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) { 1498 return Legal->isConsecutivePtr(Ptr) && 1499 TTI.isLegalMaskedLoad(DataType, Alignment); 1500 } 1501 1502 /// Returns true if the target machine supports masked scatter operation 1503 /// for the given \p DataType. 1504 bool isLegalMaskedScatter(Type *DataType, Align Alignment) { 1505 return TTI.isLegalMaskedScatter(DataType, Alignment); 1506 } 1507 1508 /// Returns true if the target machine supports masked gather operation 1509 /// for the given \p DataType. 1510 bool isLegalMaskedGather(Type *DataType, Align Alignment) { 1511 return TTI.isLegalMaskedGather(DataType, Alignment); 1512 } 1513 1514 /// Returns true if the target machine can represent \p V as a masked gather 1515 /// or scatter operation. 1516 bool isLegalGatherOrScatter(Value *V) { 1517 bool LI = isa<LoadInst>(V); 1518 bool SI = isa<StoreInst>(V); 1519 if (!LI && !SI) 1520 return false; 1521 auto *Ty = getMemInstValueType(V); 1522 Align Align = getLoadStoreAlignment(V); 1523 return (LI && isLegalMaskedGather(Ty, Align)) || 1524 (SI && isLegalMaskedScatter(Ty, Align)); 1525 } 1526 1527 /// Returns true if \p I is an instruction that will be scalarized with 1528 /// predication. Such instructions include conditional stores and 1529 /// instructions that may divide by zero. 1530 /// If a non-zero VF has been calculated, we check if I will be scalarized 1531 /// predication for that VF. 1532 bool isScalarWithPredication(Instruction *I, 1533 ElementCount VF = ElementCount::getFixed(1)); 1534 1535 // Returns true if \p I is an instruction that will be predicated either 1536 // through scalar predication or masked load/store or masked gather/scatter. 1537 // Superset of instructions that return true for isScalarWithPredication. 1538 bool isPredicatedInst(Instruction *I) { 1539 if (!blockNeedsPredication(I->getParent())) 1540 return false; 1541 // Loads and stores that need some form of masked operation are predicated 1542 // instructions. 1543 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1544 return Legal->isMaskRequired(I); 1545 return isScalarWithPredication(I); 1546 } 1547 1548 /// Returns true if \p I is a memory instruction with consecutive memory 1549 /// access that can be widened. 1550 bool 1551 memoryInstructionCanBeWidened(Instruction *I, 1552 ElementCount VF = ElementCount::getFixed(1)); 1553 1554 /// Returns true if \p I is a memory instruction in an interleaved-group 1555 /// of memory accesses that can be vectorized with wide vector loads/stores 1556 /// and shuffles. 1557 bool 1558 interleavedAccessCanBeWidened(Instruction *I, 1559 ElementCount VF = ElementCount::getFixed(1)); 1560 1561 /// Check if \p Instr belongs to any interleaved access group. 1562 bool isAccessInterleaved(Instruction *Instr) { 1563 return InterleaveInfo.isInterleaved(Instr); 1564 } 1565 1566 /// Get the interleaved access group that \p Instr belongs to. 1567 const InterleaveGroup<Instruction> * 1568 getInterleavedAccessGroup(Instruction *Instr) { 1569 return InterleaveInfo.getInterleaveGroup(Instr); 1570 } 1571 1572 /// Returns true if we're required to use a scalar epilogue for at least 1573 /// the final iteration of the original loop. 1574 bool requiresScalarEpilogue() const { 1575 if (!isScalarEpilogueAllowed()) 1576 return false; 1577 // If we might exit from anywhere but the latch, must run the exiting 1578 // iteration in scalar form. 1579 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1580 return true; 1581 return InterleaveInfo.requiresScalarEpilogue(); 1582 } 1583 1584 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1585 /// loop hint annotation. 1586 bool isScalarEpilogueAllowed() const { 1587 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1588 } 1589 1590 /// Returns true if all loop blocks should be masked to fold tail loop. 1591 bool foldTailByMasking() const { return FoldTailByMasking; } 1592 1593 bool blockNeedsPredication(BasicBlock *BB) { 1594 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1595 } 1596 1597 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1598 /// nodes to the chain of instructions representing the reductions. Uses a 1599 /// MapVector to ensure deterministic iteration order. 1600 using ReductionChainMap = 1601 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1602 1603 /// Return the chain of instructions representing an inloop reduction. 1604 const ReductionChainMap &getInLoopReductionChains() const { 1605 return InLoopReductionChains; 1606 } 1607 1608 /// Returns true if the Phi is part of an inloop reduction. 1609 bool isInLoopReduction(PHINode *Phi) const { 1610 return InLoopReductionChains.count(Phi); 1611 } 1612 1613 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1614 /// with factor VF. Return the cost of the instruction, including 1615 /// scalarization overhead if it's needed. 1616 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF); 1617 1618 /// Estimate cost of a call instruction CI if it were vectorized with factor 1619 /// VF. Return the cost of the instruction, including scalarization overhead 1620 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1621 /// scalarized - 1622 /// i.e. either vector version isn't available, or is too expensive. 1623 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1624 bool &NeedToScalarize); 1625 1626 /// Invalidates decisions already taken by the cost model. 1627 void invalidateCostModelingDecisions() { 1628 WideningDecisions.clear(); 1629 Uniforms.clear(); 1630 Scalars.clear(); 1631 } 1632 1633 private: 1634 unsigned NumPredStores = 0; 1635 1636 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1637 /// than zero. One is returned if vectorization should best be avoided due 1638 /// to cost. 1639 ElementCount computeFeasibleMaxVF(unsigned ConstTripCount, 1640 ElementCount UserVF); 1641 1642 /// The vectorization cost is a combination of the cost itself and a boolean 1643 /// indicating whether any of the contributing operations will actually 1644 /// operate on 1645 /// vector values after type legalization in the backend. If this latter value 1646 /// is 1647 /// false, then all operations will be scalarized (i.e. no vectorization has 1648 /// actually taken place). 1649 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1650 1651 /// Returns the expected execution cost. The unit of the cost does 1652 /// not matter because we use the 'cost' units to compare different 1653 /// vector widths. The cost that is returned is *not* normalized by 1654 /// the factor width. 1655 VectorizationCostTy expectedCost(ElementCount VF); 1656 1657 /// Returns the execution time cost of an instruction for a given vector 1658 /// width. Vector width of one means scalar. 1659 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1660 1661 /// The cost-computation logic from getInstructionCost which provides 1662 /// the vector type as an output parameter. 1663 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1664 Type *&VectorTy); 1665 1666 /// Return the cost of instructions in an inloop reduction pattern, if I is 1667 /// part of that pattern. 1668 InstructionCost getReductionPatternCost(Instruction *I, ElementCount VF, 1669 Type *VectorTy, 1670 TTI::TargetCostKind CostKind); 1671 1672 /// Calculate vectorization cost of memory instruction \p I. 1673 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1674 1675 /// The cost computation for scalarized memory instruction. 1676 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1677 1678 /// The cost computation for interleaving group of memory instructions. 1679 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1680 1681 /// The cost computation for Gather/Scatter instruction. 1682 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1683 1684 /// The cost computation for widening instruction \p I with consecutive 1685 /// memory access. 1686 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1687 1688 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1689 /// Load: scalar load + broadcast. 1690 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1691 /// element) 1692 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1693 1694 /// Estimate the overhead of scalarizing an instruction. This is a 1695 /// convenience wrapper for the type-based getScalarizationOverhead API. 1696 InstructionCost getScalarizationOverhead(Instruction *I, ElementCount VF); 1697 1698 /// Returns whether the instruction is a load or store and will be a emitted 1699 /// as a vector operation. 1700 bool isConsecutiveLoadOrStore(Instruction *I); 1701 1702 /// Returns true if an artificially high cost for emulated masked memrefs 1703 /// should be used. 1704 bool useEmulatedMaskMemRefHack(Instruction *I); 1705 1706 /// Map of scalar integer values to the smallest bitwidth they can be legally 1707 /// represented as. The vector equivalents of these values should be truncated 1708 /// to this type. 1709 MapVector<Instruction *, uint64_t> MinBWs; 1710 1711 /// A type representing the costs for instructions if they were to be 1712 /// scalarized rather than vectorized. The entries are Instruction-Cost 1713 /// pairs. 1714 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1715 1716 /// A set containing all BasicBlocks that are known to present after 1717 /// vectorization as a predicated block. 1718 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1719 1720 /// Records whether it is allowed to have the original scalar loop execute at 1721 /// least once. This may be needed as a fallback loop in case runtime 1722 /// aliasing/dependence checks fail, or to handle the tail/remainder 1723 /// iterations when the trip count is unknown or doesn't divide by the VF, 1724 /// or as a peel-loop to handle gaps in interleave-groups. 1725 /// Under optsize and when the trip count is very small we don't allow any 1726 /// iterations to execute in the scalar loop. 1727 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1728 1729 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1730 bool FoldTailByMasking = false; 1731 1732 /// A map holding scalar costs for different vectorization factors. The 1733 /// presence of a cost for an instruction in the mapping indicates that the 1734 /// instruction will be scalarized when vectorizing with the associated 1735 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1736 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1737 1738 /// Holds the instructions known to be uniform after vectorization. 1739 /// The data is collected per VF. 1740 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1741 1742 /// Holds the instructions known to be scalar after vectorization. 1743 /// The data is collected per VF. 1744 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1745 1746 /// Holds the instructions (address computations) that are forced to be 1747 /// scalarized. 1748 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1749 1750 /// PHINodes of the reductions that should be expanded in-loop along with 1751 /// their associated chains of reduction operations, in program order from top 1752 /// (PHI) to bottom 1753 ReductionChainMap InLoopReductionChains; 1754 1755 /// A Map of inloop reduction operations and their immediate chain operand. 1756 /// FIXME: This can be removed once reductions can be costed correctly in 1757 /// vplan. This was added to allow quick lookup to the inloop operations, 1758 /// without having to loop through InLoopReductionChains. 1759 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1760 1761 /// Returns the expected difference in cost from scalarizing the expression 1762 /// feeding a predicated instruction \p PredInst. The instructions to 1763 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1764 /// non-negative return value implies the expression will be scalarized. 1765 /// Currently, only single-use chains are considered for scalarization. 1766 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1767 ElementCount VF); 1768 1769 /// Collect the instructions that are uniform after vectorization. An 1770 /// instruction is uniform if we represent it with a single scalar value in 1771 /// the vectorized loop corresponding to each vector iteration. Examples of 1772 /// uniform instructions include pointer operands of consecutive or 1773 /// interleaved memory accesses. Note that although uniformity implies an 1774 /// instruction will be scalar, the reverse is not true. In general, a 1775 /// scalarized instruction will be represented by VF scalar values in the 1776 /// vectorized loop, each corresponding to an iteration of the original 1777 /// scalar loop. 1778 void collectLoopUniforms(ElementCount VF); 1779 1780 /// Collect the instructions that are scalar after vectorization. An 1781 /// instruction is scalar if it is known to be uniform or will be scalarized 1782 /// during vectorization. Non-uniform scalarized instructions will be 1783 /// represented by VF values in the vectorized loop, each corresponding to an 1784 /// iteration of the original scalar loop. 1785 void collectLoopScalars(ElementCount VF); 1786 1787 /// Keeps cost model vectorization decision and cost for instructions. 1788 /// Right now it is used for memory instructions only. 1789 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1790 std::pair<InstWidening, InstructionCost>>; 1791 1792 DecisionList WideningDecisions; 1793 1794 /// Returns true if \p V is expected to be vectorized and it needs to be 1795 /// extracted. 1796 bool needsExtract(Value *V, ElementCount VF) const { 1797 Instruction *I = dyn_cast<Instruction>(V); 1798 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1799 TheLoop->isLoopInvariant(I)) 1800 return false; 1801 1802 // Assume we can vectorize V (and hence we need extraction) if the 1803 // scalars are not computed yet. This can happen, because it is called 1804 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1805 // the scalars are collected. That should be a safe assumption in most 1806 // cases, because we check if the operands have vectorizable types 1807 // beforehand in LoopVectorizationLegality. 1808 return Scalars.find(VF) == Scalars.end() || 1809 !isScalarAfterVectorization(I, VF); 1810 }; 1811 1812 /// Returns a range containing only operands needing to be extracted. 1813 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1814 ElementCount VF) { 1815 return SmallVector<Value *, 4>(make_filter_range( 1816 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1817 } 1818 1819 /// Determines if we have the infrastructure to vectorize loop \p L and its 1820 /// epilogue, assuming the main loop is vectorized by \p VF. 1821 bool isCandidateForEpilogueVectorization(const Loop &L, 1822 const ElementCount VF) const; 1823 1824 /// Returns true if epilogue vectorization is considered profitable, and 1825 /// false otherwise. 1826 /// \p VF is the vectorization factor chosen for the original loop. 1827 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1828 1829 public: 1830 /// The loop that we evaluate. 1831 Loop *TheLoop; 1832 1833 /// Predicated scalar evolution analysis. 1834 PredicatedScalarEvolution &PSE; 1835 1836 /// Loop Info analysis. 1837 LoopInfo *LI; 1838 1839 /// Vectorization legality. 1840 LoopVectorizationLegality *Legal; 1841 1842 /// Vector target information. 1843 const TargetTransformInfo &TTI; 1844 1845 /// Target Library Info. 1846 const TargetLibraryInfo *TLI; 1847 1848 /// Demanded bits analysis. 1849 DemandedBits *DB; 1850 1851 /// Assumption cache. 1852 AssumptionCache *AC; 1853 1854 /// Interface to emit optimization remarks. 1855 OptimizationRemarkEmitter *ORE; 1856 1857 const Function *TheFunction; 1858 1859 /// Loop Vectorize Hint. 1860 const LoopVectorizeHints *Hints; 1861 1862 /// The interleave access information contains groups of interleaved accesses 1863 /// with the same stride and close to each other. 1864 InterleavedAccessInfo &InterleaveInfo; 1865 1866 /// Values to ignore in the cost model. 1867 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1868 1869 /// Values to ignore in the cost model when VF > 1. 1870 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1871 1872 /// Profitable vector factors. 1873 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1874 }; 1875 1876 } // end namespace llvm 1877 1878 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1879 // vectorization. The loop needs to be annotated with #pragma omp simd 1880 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1881 // vector length information is not provided, vectorization is not considered 1882 // explicit. Interleave hints are not allowed either. These limitations will be 1883 // relaxed in the future. 1884 // Please, note that we are currently forced to abuse the pragma 'clang 1885 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1886 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1887 // provides *explicit vectorization hints* (LV can bypass legal checks and 1888 // assume that vectorization is legal). However, both hints are implemented 1889 // using the same metadata (llvm.loop.vectorize, processed by 1890 // LoopVectorizeHints). This will be fixed in the future when the native IR 1891 // representation for pragma 'omp simd' is introduced. 1892 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1893 OptimizationRemarkEmitter *ORE) { 1894 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 1895 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1896 1897 // Only outer loops with an explicit vectorization hint are supported. 1898 // Unannotated outer loops are ignored. 1899 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1900 return false; 1901 1902 Function *Fn = OuterLp->getHeader()->getParent(); 1903 if (!Hints.allowVectorization(Fn, OuterLp, 1904 true /*VectorizeOnlyWhenForced*/)) { 1905 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1906 return false; 1907 } 1908 1909 if (Hints.getInterleave() > 1) { 1910 // TODO: Interleave support is future work. 1911 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1912 "outer loops.\n"); 1913 Hints.emitRemarkWithHints(); 1914 return false; 1915 } 1916 1917 return true; 1918 } 1919 1920 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1921 OptimizationRemarkEmitter *ORE, 1922 SmallVectorImpl<Loop *> &V) { 1923 // Collect inner loops and outer loops without irreducible control flow. For 1924 // now, only collect outer loops that have explicit vectorization hints. If we 1925 // are stress testing the VPlan H-CFG construction, we collect the outermost 1926 // loop of every loop nest. 1927 if (L.isInnermost() || VPlanBuildStressTest || 1928 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1929 LoopBlocksRPO RPOT(&L); 1930 RPOT.perform(LI); 1931 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1932 V.push_back(&L); 1933 // TODO: Collect inner loops inside marked outer loops in case 1934 // vectorization fails for the outer loop. Do not invoke 1935 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1936 // already known to be reducible. We can use an inherited attribute for 1937 // that. 1938 return; 1939 } 1940 } 1941 for (Loop *InnerL : L) 1942 collectSupportedLoops(*InnerL, LI, ORE, V); 1943 } 1944 1945 namespace { 1946 1947 /// The LoopVectorize Pass. 1948 struct LoopVectorize : public FunctionPass { 1949 /// Pass identification, replacement for typeid 1950 static char ID; 1951 1952 LoopVectorizePass Impl; 1953 1954 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1955 bool VectorizeOnlyWhenForced = false) 1956 : FunctionPass(ID), 1957 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1958 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1959 } 1960 1961 bool runOnFunction(Function &F) override { 1962 if (skipFunction(F)) 1963 return false; 1964 1965 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1966 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1967 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1968 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1969 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1970 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1971 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1972 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1973 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1974 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1975 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1976 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1977 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1978 1979 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1980 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1981 1982 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1983 GetLAA, *ORE, PSI).MadeAnyChange; 1984 } 1985 1986 void getAnalysisUsage(AnalysisUsage &AU) const override { 1987 AU.addRequired<AssumptionCacheTracker>(); 1988 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1989 AU.addRequired<DominatorTreeWrapperPass>(); 1990 AU.addRequired<LoopInfoWrapperPass>(); 1991 AU.addRequired<ScalarEvolutionWrapperPass>(); 1992 AU.addRequired<TargetTransformInfoWrapperPass>(); 1993 AU.addRequired<AAResultsWrapperPass>(); 1994 AU.addRequired<LoopAccessLegacyAnalysis>(); 1995 AU.addRequired<DemandedBitsWrapperPass>(); 1996 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1997 AU.addRequired<InjectTLIMappingsLegacy>(); 1998 1999 // We currently do not preserve loopinfo/dominator analyses with outer loop 2000 // vectorization. Until this is addressed, mark these analyses as preserved 2001 // only for non-VPlan-native path. 2002 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2003 if (!EnableVPlanNativePath) { 2004 AU.addPreserved<LoopInfoWrapperPass>(); 2005 AU.addPreserved<DominatorTreeWrapperPass>(); 2006 } 2007 2008 AU.addPreserved<BasicAAWrapperPass>(); 2009 AU.addPreserved<GlobalsAAWrapperPass>(); 2010 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2011 } 2012 }; 2013 2014 } // end anonymous namespace 2015 2016 //===----------------------------------------------------------------------===// 2017 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2018 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2019 //===----------------------------------------------------------------------===// 2020 2021 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2022 // We need to place the broadcast of invariant variables outside the loop, 2023 // but only if it's proven safe to do so. Else, broadcast will be inside 2024 // vector loop body. 2025 Instruction *Instr = dyn_cast<Instruction>(V); 2026 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2027 (!Instr || 2028 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2029 // Place the code for broadcasting invariant variables in the new preheader. 2030 IRBuilder<>::InsertPointGuard Guard(Builder); 2031 if (SafeToHoist) 2032 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2033 2034 // Broadcast the scalar into all locations in the vector. 2035 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2036 2037 return Shuf; 2038 } 2039 2040 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2041 const InductionDescriptor &II, Value *Step, Value *Start, 2042 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2043 VPTransformState &State) { 2044 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2045 "Expected either an induction phi-node or a truncate of it!"); 2046 2047 // Construct the initial value of the vector IV in the vector loop preheader 2048 auto CurrIP = Builder.saveIP(); 2049 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2050 if (isa<TruncInst>(EntryVal)) { 2051 assert(Start->getType()->isIntegerTy() && 2052 "Truncation requires an integer type"); 2053 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2054 Step = Builder.CreateTrunc(Step, TruncType); 2055 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2056 } 2057 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2058 Value *SteppedStart = 2059 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2060 2061 // We create vector phi nodes for both integer and floating-point induction 2062 // variables. Here, we determine the kind of arithmetic we will perform. 2063 Instruction::BinaryOps AddOp; 2064 Instruction::BinaryOps MulOp; 2065 if (Step->getType()->isIntegerTy()) { 2066 AddOp = Instruction::Add; 2067 MulOp = Instruction::Mul; 2068 } else { 2069 AddOp = II.getInductionOpcode(); 2070 MulOp = Instruction::FMul; 2071 } 2072 2073 // Multiply the vectorization factor by the step using integer or 2074 // floating-point arithmetic as appropriate. 2075 Value *ConstVF = 2076 getSignedIntOrFpConstant(Step->getType(), VF.getKnownMinValue()); 2077 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 2078 2079 // Create a vector splat to use in the induction update. 2080 // 2081 // FIXME: If the step is non-constant, we create the vector splat with 2082 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2083 // handle a constant vector splat. 2084 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2085 Value *SplatVF = isa<Constant>(Mul) 2086 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2087 : Builder.CreateVectorSplat(VF, Mul); 2088 Builder.restoreIP(CurrIP); 2089 2090 // We may need to add the step a number of times, depending on the unroll 2091 // factor. The last of those goes into the PHI. 2092 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2093 &*LoopVectorBody->getFirstInsertionPt()); 2094 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2095 Instruction *LastInduction = VecInd; 2096 for (unsigned Part = 0; Part < UF; ++Part) { 2097 State.set(Def, EntryVal, LastInduction, Part); 2098 2099 if (isa<TruncInst>(EntryVal)) 2100 addMetadata(LastInduction, EntryVal); 2101 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2102 State, Part); 2103 2104 LastInduction = cast<Instruction>(addFastMathFlag( 2105 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 2106 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2107 } 2108 2109 // Move the last step to the end of the latch block. This ensures consistent 2110 // placement of all induction updates. 2111 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2112 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2113 auto *ICmp = cast<Instruction>(Br->getCondition()); 2114 LastInduction->moveBefore(ICmp); 2115 LastInduction->setName("vec.ind.next"); 2116 2117 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2118 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2119 } 2120 2121 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2122 return Cost->isScalarAfterVectorization(I, VF) || 2123 Cost->isProfitableToScalarize(I, VF); 2124 } 2125 2126 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2127 if (shouldScalarizeInstruction(IV)) 2128 return true; 2129 auto isScalarInst = [&](User *U) -> bool { 2130 auto *I = cast<Instruction>(U); 2131 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2132 }; 2133 return llvm::any_of(IV->users(), isScalarInst); 2134 } 2135 2136 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2137 const InductionDescriptor &ID, const Instruction *EntryVal, 2138 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2139 unsigned Part, unsigned Lane) { 2140 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2141 "Expected either an induction phi-node or a truncate of it!"); 2142 2143 // This induction variable is not the phi from the original loop but the 2144 // newly-created IV based on the proof that casted Phi is equal to the 2145 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2146 // re-uses the same InductionDescriptor that original IV uses but we don't 2147 // have to do any recording in this case - that is done when original IV is 2148 // processed. 2149 if (isa<TruncInst>(EntryVal)) 2150 return; 2151 2152 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2153 if (Casts.empty()) 2154 return; 2155 // Only the first Cast instruction in the Casts vector is of interest. 2156 // The rest of the Casts (if exist) have no uses outside the 2157 // induction update chain itself. 2158 if (Lane < UINT_MAX) 2159 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2160 else 2161 State.set(CastDef, VectorLoopVal, Part); 2162 } 2163 2164 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2165 TruncInst *Trunc, VPValue *Def, 2166 VPValue *CastDef, 2167 VPTransformState &State) { 2168 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2169 "Primary induction variable must have an integer type"); 2170 2171 auto II = Legal->getInductionVars().find(IV); 2172 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2173 2174 auto ID = II->second; 2175 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2176 2177 // The value from the original loop to which we are mapping the new induction 2178 // variable. 2179 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2180 2181 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2182 2183 // Generate code for the induction step. Note that induction steps are 2184 // required to be loop-invariant 2185 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2186 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2187 "Induction step should be loop invariant"); 2188 if (PSE.getSE()->isSCEVable(IV->getType())) { 2189 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2190 return Exp.expandCodeFor(Step, Step->getType(), 2191 LoopVectorPreHeader->getTerminator()); 2192 } 2193 return cast<SCEVUnknown>(Step)->getValue(); 2194 }; 2195 2196 // The scalar value to broadcast. This is derived from the canonical 2197 // induction variable. If a truncation type is given, truncate the canonical 2198 // induction variable and step. Otherwise, derive these values from the 2199 // induction descriptor. 2200 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2201 Value *ScalarIV = Induction; 2202 if (IV != OldInduction) { 2203 ScalarIV = IV->getType()->isIntegerTy() 2204 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2205 : Builder.CreateCast(Instruction::SIToFP, Induction, 2206 IV->getType()); 2207 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2208 ScalarIV->setName("offset.idx"); 2209 } 2210 if (Trunc) { 2211 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2212 assert(Step->getType()->isIntegerTy() && 2213 "Truncation requires an integer step"); 2214 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2215 Step = Builder.CreateTrunc(Step, TruncType); 2216 } 2217 return ScalarIV; 2218 }; 2219 2220 // Create the vector values from the scalar IV, in the absence of creating a 2221 // vector IV. 2222 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2223 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2224 for (unsigned Part = 0; Part < UF; ++Part) { 2225 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2226 Value *EntryPart = 2227 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2228 ID.getInductionOpcode()); 2229 State.set(Def, EntryVal, EntryPart, Part); 2230 if (Trunc) 2231 addMetadata(EntryPart, Trunc); 2232 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2233 State, Part); 2234 } 2235 }; 2236 2237 // Now do the actual transformations, and start with creating the step value. 2238 Value *Step = CreateStepValue(ID.getStep()); 2239 if (VF.isZero() || VF.isScalar()) { 2240 Value *ScalarIV = CreateScalarIV(Step); 2241 CreateSplatIV(ScalarIV, Step); 2242 return; 2243 } 2244 2245 // Determine if we want a scalar version of the induction variable. This is 2246 // true if the induction variable itself is not widened, or if it has at 2247 // least one user in the loop that is not widened. 2248 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2249 if (!NeedsScalarIV) { 2250 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2251 State); 2252 return; 2253 } 2254 2255 // Try to create a new independent vector induction variable. If we can't 2256 // create the phi node, we will splat the scalar induction variable in each 2257 // loop iteration. 2258 if (!shouldScalarizeInstruction(EntryVal)) { 2259 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2260 State); 2261 Value *ScalarIV = CreateScalarIV(Step); 2262 // Create scalar steps that can be used by instructions we will later 2263 // scalarize. Note that the addition of the scalar steps will not increase 2264 // the number of instructions in the loop in the common case prior to 2265 // InstCombine. We will be trading one vector extract for each scalar step. 2266 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2267 return; 2268 } 2269 2270 // All IV users are scalar instructions, so only emit a scalar IV, not a 2271 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2272 // predicate used by the masked loads/stores. 2273 Value *ScalarIV = CreateScalarIV(Step); 2274 if (!Cost->isScalarEpilogueAllowed()) 2275 CreateSplatIV(ScalarIV, Step); 2276 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2277 } 2278 2279 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2280 Instruction::BinaryOps BinOp) { 2281 // Create and check the types. 2282 auto *ValVTy = cast<FixedVectorType>(Val->getType()); 2283 int VLen = ValVTy->getNumElements(); 2284 2285 Type *STy = Val->getType()->getScalarType(); 2286 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2287 "Induction Step must be an integer or FP"); 2288 assert(Step->getType() == STy && "Step has wrong type"); 2289 2290 SmallVector<Constant *, 8> Indices; 2291 2292 if (STy->isIntegerTy()) { 2293 // Create a vector of consecutive numbers from zero to VF. 2294 for (int i = 0; i < VLen; ++i) 2295 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2296 2297 // Add the consecutive indices to the vector value. 2298 Constant *Cv = ConstantVector::get(Indices); 2299 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2300 Step = Builder.CreateVectorSplat(VLen, Step); 2301 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2302 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2303 // which can be found from the original scalar operations. 2304 Step = Builder.CreateMul(Cv, Step); 2305 return Builder.CreateAdd(Val, Step, "induction"); 2306 } 2307 2308 // Floating point induction. 2309 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2310 "Binary Opcode should be specified for FP induction"); 2311 // Create a vector of consecutive numbers from zero to VF. 2312 for (int i = 0; i < VLen; ++i) 2313 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2314 2315 // Add the consecutive indices to the vector value. 2316 Constant *Cv = ConstantVector::get(Indices); 2317 2318 Step = Builder.CreateVectorSplat(VLen, Step); 2319 2320 // Floating point operations had to be 'fast' to enable the induction. 2321 FastMathFlags Flags; 2322 Flags.setFast(); 2323 2324 Value *MulOp = Builder.CreateFMul(Cv, Step); 2325 if (isa<Instruction>(MulOp)) 2326 // Have to check, MulOp may be a constant 2327 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2328 2329 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2330 if (isa<Instruction>(BOp)) 2331 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2332 return BOp; 2333 } 2334 2335 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2336 Instruction *EntryVal, 2337 const InductionDescriptor &ID, 2338 VPValue *Def, VPValue *CastDef, 2339 VPTransformState &State) { 2340 // We shouldn't have to build scalar steps if we aren't vectorizing. 2341 assert(VF.isVector() && "VF should be greater than one"); 2342 // Get the value type and ensure it and the step have the same integer type. 2343 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2344 assert(ScalarIVTy == Step->getType() && 2345 "Val and Step should have the same type"); 2346 2347 // We build scalar steps for both integer and floating-point induction 2348 // variables. Here, we determine the kind of arithmetic we will perform. 2349 Instruction::BinaryOps AddOp; 2350 Instruction::BinaryOps MulOp; 2351 if (ScalarIVTy->isIntegerTy()) { 2352 AddOp = Instruction::Add; 2353 MulOp = Instruction::Mul; 2354 } else { 2355 AddOp = ID.getInductionOpcode(); 2356 MulOp = Instruction::FMul; 2357 } 2358 2359 // Determine the number of scalars we need to generate for each unroll 2360 // iteration. If EntryVal is uniform, we only need to generate the first 2361 // lane. Otherwise, we generate all VF values. 2362 unsigned Lanes = 2363 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) 2364 ? 1 2365 : VF.getKnownMinValue(); 2366 assert((!VF.isScalable() || Lanes == 1) && 2367 "Should never scalarize a scalable vector"); 2368 // Compute the scalar steps and save the results in VectorLoopValueMap. 2369 for (unsigned Part = 0; Part < UF; ++Part) { 2370 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2371 auto *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2372 ScalarIVTy->getScalarSizeInBits()); 2373 Value *StartIdx = 2374 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2375 if (ScalarIVTy->isFloatingPointTy()) 2376 StartIdx = Builder.CreateSIToFP(StartIdx, ScalarIVTy); 2377 StartIdx = addFastMathFlag(Builder.CreateBinOp( 2378 AddOp, StartIdx, getSignedIntOrFpConstant(ScalarIVTy, Lane))); 2379 // The step returned by `createStepForVF` is a runtime-evaluated value 2380 // when VF is scalable. Otherwise, it should be folded into a Constant. 2381 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2382 "Expected StartIdx to be folded to a constant when VF is not " 2383 "scalable"); 2384 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2385 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2386 State.set(Def, Add, VPIteration(Part, Lane)); 2387 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2388 Part, Lane); 2389 } 2390 } 2391 } 2392 2393 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2394 assert(V != Induction && "The new induction variable should not be used."); 2395 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2396 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2397 2398 // If we have a stride that is replaced by one, do it here. Defer this for 2399 // the VPlan-native path until we start running Legal checks in that path. 2400 if (!EnableVPlanNativePath && Legal->hasStride(V)) 2401 V = ConstantInt::get(V->getType(), 1); 2402 2403 // If we have a vector mapped to this value, return it. 2404 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2405 return VectorLoopValueMap.getVectorValue(V, Part); 2406 2407 // If the value has not been vectorized, check if it has been scalarized 2408 // instead. If it has been scalarized, and we actually need the value in 2409 // vector form, we will construct the vector values on demand. 2410 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2411 Value *ScalarValue = 2412 VectorLoopValueMap.getScalarValue(V, VPIteration(Part, 0)); 2413 2414 // If we've scalarized a value, that value should be an instruction. 2415 auto *I = cast<Instruction>(V); 2416 2417 // If we aren't vectorizing, we can just copy the scalar map values over to 2418 // the vector map. 2419 if (VF.isScalar()) { 2420 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2421 return ScalarValue; 2422 } 2423 2424 // Get the last scalar instruction we generated for V and Part. If the value 2425 // is known to be uniform after vectorization, this corresponds to lane zero 2426 // of the Part unroll iteration. Otherwise, the last instruction is the one 2427 // we created for the last vector lane of the Part unroll iteration. 2428 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) 2429 ? 0 2430 : VF.getKnownMinValue() - 1; 2431 assert((!VF.isScalable() || LastLane == 0) && 2432 "Scalable vectorization can't lead to any scalarized values."); 2433 auto *LastInst = cast<Instruction>( 2434 VectorLoopValueMap.getScalarValue(V, VPIteration(Part, LastLane))); 2435 2436 // Set the insert point after the last scalarized instruction. This ensures 2437 // the insertelement sequence will directly follow the scalar definitions. 2438 auto OldIP = Builder.saveIP(); 2439 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2440 Builder.SetInsertPoint(&*NewIP); 2441 2442 // However, if we are vectorizing, we need to construct the vector values. 2443 // If the value is known to be uniform after vectorization, we can just 2444 // broadcast the scalar value corresponding to lane zero for each unroll 2445 // iteration. Otherwise, we construct the vector values using insertelement 2446 // instructions. Since the resulting vectors are stored in 2447 // VectorLoopValueMap, we will only generate the insertelements once. 2448 Value *VectorValue = nullptr; 2449 if (Cost->isUniformAfterVectorization(I, VF)) { 2450 VectorValue = getBroadcastInstrs(ScalarValue); 2451 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2452 } else { 2453 // Initialize packing with insertelements to start from poison. 2454 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2455 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), VF)); 2456 VectorLoopValueMap.setVectorValue(V, Part, Poison); 2457 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 2458 packScalarIntoVectorValue(V, VPIteration(Part, Lane)); 2459 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2460 } 2461 Builder.restoreIP(OldIP); 2462 return VectorValue; 2463 } 2464 2465 // If this scalar is unknown, assume that it is a constant or that it is 2466 // loop invariant. Broadcast V and save the value for future uses. 2467 Value *B = getBroadcastInstrs(V); 2468 VectorLoopValueMap.setVectorValue(V, Part, B); 2469 return B; 2470 } 2471 2472 Value * 2473 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2474 const VPIteration &Instance) { 2475 // If the value is not an instruction contained in the loop, it should 2476 // already be scalar. 2477 if (OrigLoop->isLoopInvariant(V)) 2478 return V; 2479 2480 assert(Instance.Lane > 0 2481 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2482 : true && "Uniform values only have lane zero"); 2483 2484 // If the value from the original loop has not been vectorized, it is 2485 // represented by UF x VF scalar values in the new loop. Return the requested 2486 // scalar value. 2487 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2488 return VectorLoopValueMap.getScalarValue(V, Instance); 2489 2490 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2491 // for the given unroll part. If this entry is not a vector type (i.e., the 2492 // vectorization factor is one), there is no need to generate an 2493 // extractelement instruction. 2494 auto *U = getOrCreateVectorValue(V, Instance.Part); 2495 if (!U->getType()->isVectorTy()) { 2496 assert(VF.isScalar() && "Value not scalarized has non-vector type"); 2497 return U; 2498 } 2499 2500 // Otherwise, the value from the original loop has been vectorized and is 2501 // represented by UF vector values. Extract and return the requested scalar 2502 // value from the appropriate vector lane. 2503 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2504 } 2505 2506 void InnerLoopVectorizer::packScalarIntoVectorValue( 2507 Value *V, const VPIteration &Instance) { 2508 assert(V != Induction && "The new induction variable should not be used."); 2509 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2510 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2511 2512 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2513 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2514 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2515 Builder.getInt32(Instance.Lane)); 2516 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2517 } 2518 2519 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2520 const VPIteration &Instance, 2521 VPTransformState &State) { 2522 Value *ScalarInst = State.get(Def, Instance); 2523 Value *VectorValue = State.get(Def, Instance.Part); 2524 VectorValue = Builder.CreateInsertElement( 2525 VectorValue, ScalarInst, State.Builder.getInt32(Instance.Lane)); 2526 State.set(Def, VectorValue, Instance.Part); 2527 } 2528 2529 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2530 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2531 assert(!VF.isScalable() && "Cannot reverse scalable vectors"); 2532 SmallVector<int, 8> ShuffleMask; 2533 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 2534 ShuffleMask.push_back(VF.getKnownMinValue() - i - 1); 2535 2536 return Builder.CreateShuffleVector(Vec, ShuffleMask, "reverse"); 2537 } 2538 2539 // Return whether we allow using masked interleave-groups (for dealing with 2540 // strided loads/stores that reside in predicated blocks, or for dealing 2541 // with gaps). 2542 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2543 // If an override option has been passed in for interleaved accesses, use it. 2544 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2545 return EnableMaskedInterleavedMemAccesses; 2546 2547 return TTI.enableMaskedInterleavedAccessVectorization(); 2548 } 2549 2550 // Try to vectorize the interleave group that \p Instr belongs to. 2551 // 2552 // E.g. Translate following interleaved load group (factor = 3): 2553 // for (i = 0; i < N; i+=3) { 2554 // R = Pic[i]; // Member of index 0 2555 // G = Pic[i+1]; // Member of index 1 2556 // B = Pic[i+2]; // Member of index 2 2557 // ... // do something to R, G, B 2558 // } 2559 // To: 2560 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2561 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2562 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2563 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2564 // 2565 // Or translate following interleaved store group (factor = 3): 2566 // for (i = 0; i < N; i+=3) { 2567 // ... do something to R, G, B 2568 // Pic[i] = R; // Member of index 0 2569 // Pic[i+1] = G; // Member of index 1 2570 // Pic[i+2] = B; // Member of index 2 2571 // } 2572 // To: 2573 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2574 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2575 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2576 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2577 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2578 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2579 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2580 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2581 VPValue *BlockInMask) { 2582 Instruction *Instr = Group->getInsertPos(); 2583 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2584 2585 // Prepare for the vector type of the interleaved load/store. 2586 Type *ScalarTy = getMemInstValueType(Instr); 2587 unsigned InterleaveFactor = Group->getFactor(); 2588 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2589 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2590 2591 // Prepare for the new pointers. 2592 SmallVector<Value *, 2> AddrParts; 2593 unsigned Index = Group->getIndex(Instr); 2594 2595 // TODO: extend the masked interleaved-group support to reversed access. 2596 assert((!BlockInMask || !Group->isReverse()) && 2597 "Reversed masked interleave-group not supported."); 2598 2599 // If the group is reverse, adjust the index to refer to the last vector lane 2600 // instead of the first. We adjust the index from the first vector lane, 2601 // rather than directly getting the pointer for lane VF - 1, because the 2602 // pointer operand of the interleaved access is supposed to be uniform. For 2603 // uniform instructions, we're only required to generate a value for the 2604 // first vector lane in each unroll iteration. 2605 assert(!VF.isScalable() && 2606 "scalable vector reverse operation is not implemented"); 2607 if (Group->isReverse()) 2608 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2609 2610 for (unsigned Part = 0; Part < UF; Part++) { 2611 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2612 setDebugLocFromInst(Builder, AddrPart); 2613 2614 // Notice current instruction could be any index. Need to adjust the address 2615 // to the member of index 0. 2616 // 2617 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2618 // b = A[i]; // Member of index 0 2619 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2620 // 2621 // E.g. A[i+1] = a; // Member of index 1 2622 // A[i] = b; // Member of index 0 2623 // A[i+2] = c; // Member of index 2 (Current instruction) 2624 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2625 2626 bool InBounds = false; 2627 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2628 InBounds = gep->isInBounds(); 2629 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2630 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2631 2632 // Cast to the vector pointer type. 2633 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2634 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2635 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2636 } 2637 2638 setDebugLocFromInst(Builder, Instr); 2639 Value *PoisonVec = PoisonValue::get(VecTy); 2640 2641 Value *MaskForGaps = nullptr; 2642 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2643 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2644 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2645 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2646 } 2647 2648 // Vectorize the interleaved load group. 2649 if (isa<LoadInst>(Instr)) { 2650 // For each unroll part, create a wide load for the group. 2651 SmallVector<Value *, 2> NewLoads; 2652 for (unsigned Part = 0; Part < UF; Part++) { 2653 Instruction *NewLoad; 2654 if (BlockInMask || MaskForGaps) { 2655 assert(useMaskedInterleavedAccesses(*TTI) && 2656 "masked interleaved groups are not allowed."); 2657 Value *GroupMask = MaskForGaps; 2658 if (BlockInMask) { 2659 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2660 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2661 Value *ShuffledMask = Builder.CreateShuffleVector( 2662 BlockInMaskPart, 2663 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2664 "interleaved.mask"); 2665 GroupMask = MaskForGaps 2666 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2667 MaskForGaps) 2668 : ShuffledMask; 2669 } 2670 NewLoad = 2671 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2672 GroupMask, PoisonVec, "wide.masked.vec"); 2673 } 2674 else 2675 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2676 Group->getAlign(), "wide.vec"); 2677 Group->addMetadata(NewLoad); 2678 NewLoads.push_back(NewLoad); 2679 } 2680 2681 // For each member in the group, shuffle out the appropriate data from the 2682 // wide loads. 2683 unsigned J = 0; 2684 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2685 Instruction *Member = Group->getMember(I); 2686 2687 // Skip the gaps in the group. 2688 if (!Member) 2689 continue; 2690 2691 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2692 auto StrideMask = 2693 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2694 for (unsigned Part = 0; Part < UF; Part++) { 2695 Value *StridedVec = Builder.CreateShuffleVector( 2696 NewLoads[Part], StrideMask, "strided.vec"); 2697 2698 // If this member has different type, cast the result type. 2699 if (Member->getType() != ScalarTy) { 2700 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2701 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2702 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2703 } 2704 2705 if (Group->isReverse()) 2706 StridedVec = reverseVector(StridedVec); 2707 2708 State.set(VPDefs[J], Member, StridedVec, Part); 2709 } 2710 ++J; 2711 } 2712 return; 2713 } 2714 2715 // The sub vector type for current instruction. 2716 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2717 auto *SubVT = VectorType::get(ScalarTy, VF); 2718 2719 // Vectorize the interleaved store group. 2720 for (unsigned Part = 0; Part < UF; Part++) { 2721 // Collect the stored vector from each member. 2722 SmallVector<Value *, 4> StoredVecs; 2723 for (unsigned i = 0; i < InterleaveFactor; i++) { 2724 // Interleaved store group doesn't allow a gap, so each index has a member 2725 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2726 2727 Value *StoredVec = State.get(StoredValues[i], Part); 2728 2729 if (Group->isReverse()) 2730 StoredVec = reverseVector(StoredVec); 2731 2732 // If this member has different type, cast it to a unified type. 2733 2734 if (StoredVec->getType() != SubVT) 2735 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2736 2737 StoredVecs.push_back(StoredVec); 2738 } 2739 2740 // Concatenate all vectors into a wide vector. 2741 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2742 2743 // Interleave the elements in the wide vector. 2744 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2745 Value *IVec = Builder.CreateShuffleVector( 2746 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2747 "interleaved.vec"); 2748 2749 Instruction *NewStoreInstr; 2750 if (BlockInMask) { 2751 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2752 Value *ShuffledMask = Builder.CreateShuffleVector( 2753 BlockInMaskPart, 2754 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2755 "interleaved.mask"); 2756 NewStoreInstr = Builder.CreateMaskedStore( 2757 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2758 } 2759 else 2760 NewStoreInstr = 2761 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2762 2763 Group->addMetadata(NewStoreInstr); 2764 } 2765 } 2766 2767 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2768 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2769 VPValue *StoredValue, VPValue *BlockInMask) { 2770 // Attempt to issue a wide load. 2771 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2772 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2773 2774 assert((LI || SI) && "Invalid Load/Store instruction"); 2775 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2776 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2777 2778 LoopVectorizationCostModel::InstWidening Decision = 2779 Cost->getWideningDecision(Instr, VF); 2780 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2781 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2782 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2783 "CM decision is not to widen the memory instruction"); 2784 2785 Type *ScalarDataTy = getMemInstValueType(Instr); 2786 2787 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2788 const Align Alignment = getLoadStoreAlignment(Instr); 2789 2790 // Determine if the pointer operand of the access is either consecutive or 2791 // reverse consecutive. 2792 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2793 bool ConsecutiveStride = 2794 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2795 bool CreateGatherScatter = 2796 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2797 2798 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2799 // gather/scatter. Otherwise Decision should have been to Scalarize. 2800 assert((ConsecutiveStride || CreateGatherScatter) && 2801 "The instruction should be scalarized"); 2802 (void)ConsecutiveStride; 2803 2804 VectorParts BlockInMaskParts(UF); 2805 bool isMaskRequired = BlockInMask; 2806 if (isMaskRequired) 2807 for (unsigned Part = 0; Part < UF; ++Part) 2808 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2809 2810 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2811 // Calculate the pointer for the specific unroll-part. 2812 GetElementPtrInst *PartPtr = nullptr; 2813 2814 bool InBounds = false; 2815 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2816 InBounds = gep->isInBounds(); 2817 2818 if (Reverse) { 2819 assert(!VF.isScalable() && 2820 "Reversing vectors is not yet supported for scalable vectors."); 2821 2822 // If the address is consecutive but reversed, then the 2823 // wide store needs to start at the last vector element. 2824 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2825 ScalarDataTy, Ptr, Builder.getInt32(-Part * VF.getKnownMinValue()))); 2826 PartPtr->setIsInBounds(InBounds); 2827 PartPtr = cast<GetElementPtrInst>(Builder.CreateGEP( 2828 ScalarDataTy, PartPtr, Builder.getInt32(1 - VF.getKnownMinValue()))); 2829 PartPtr->setIsInBounds(InBounds); 2830 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2831 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2832 } else { 2833 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2834 PartPtr = cast<GetElementPtrInst>( 2835 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2836 PartPtr->setIsInBounds(InBounds); 2837 } 2838 2839 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2840 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2841 }; 2842 2843 // Handle Stores: 2844 if (SI) { 2845 setDebugLocFromInst(Builder, SI); 2846 2847 for (unsigned Part = 0; Part < UF; ++Part) { 2848 Instruction *NewSI = nullptr; 2849 Value *StoredVal = State.get(StoredValue, Part); 2850 if (CreateGatherScatter) { 2851 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2852 Value *VectorGep = State.get(Addr, Part); 2853 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2854 MaskPart); 2855 } else { 2856 if (Reverse) { 2857 // If we store to reverse consecutive memory locations, then we need 2858 // to reverse the order of elements in the stored value. 2859 StoredVal = reverseVector(StoredVal); 2860 // We don't want to update the value in the map as it might be used in 2861 // another expression. So don't call resetVectorValue(StoredVal). 2862 } 2863 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2864 if (isMaskRequired) 2865 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2866 BlockInMaskParts[Part]); 2867 else 2868 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2869 } 2870 addMetadata(NewSI, SI); 2871 } 2872 return; 2873 } 2874 2875 // Handle loads. 2876 assert(LI && "Must have a load instruction"); 2877 setDebugLocFromInst(Builder, LI); 2878 for (unsigned Part = 0; Part < UF; ++Part) { 2879 Value *NewLI; 2880 if (CreateGatherScatter) { 2881 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2882 Value *VectorGep = State.get(Addr, Part); 2883 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2884 nullptr, "wide.masked.gather"); 2885 addMetadata(NewLI, LI); 2886 } else { 2887 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2888 if (isMaskRequired) 2889 NewLI = Builder.CreateMaskedLoad( 2890 VecPtr, Alignment, BlockInMaskParts[Part], PoisonValue::get(DataTy), 2891 "wide.masked.load"); 2892 else 2893 NewLI = 2894 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2895 2896 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2897 addMetadata(NewLI, LI); 2898 if (Reverse) 2899 NewLI = reverseVector(NewLI); 2900 } 2901 2902 State.set(Def, Instr, NewLI, Part); 2903 } 2904 } 2905 2906 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User, 2907 const VPIteration &Instance, 2908 bool IfPredicateInstr, 2909 VPTransformState &State) { 2910 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2911 2912 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2913 // the first lane and part. 2914 if (isa<NoAliasScopeDeclInst>(Instr)) 2915 if (!Instance.isFirstIteration()) 2916 return; 2917 2918 setDebugLocFromInst(Builder, Instr); 2919 2920 // Does this instruction return a value ? 2921 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2922 2923 Instruction *Cloned = Instr->clone(); 2924 if (!IsVoidRetTy) 2925 Cloned->setName(Instr->getName() + ".cloned"); 2926 2927 // Replace the operands of the cloned instructions with their scalar 2928 // equivalents in the new loop. 2929 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 2930 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 2931 auto InputInstance = Instance; 2932 if (!Operand || !OrigLoop->contains(Operand) || 2933 (Cost->isUniformAfterVectorization(Operand, State.VF))) 2934 InputInstance.Lane = 0; 2935 auto *NewOp = State.get(User.getOperand(op), InputInstance); 2936 Cloned->setOperand(op, NewOp); 2937 } 2938 addNewMetadata(Cloned, Instr); 2939 2940 // Place the cloned scalar in the new loop. 2941 Builder.Insert(Cloned); 2942 2943 // TODO: Set result for VPValue of VPReciplicateRecipe. This requires 2944 // representing scalar values in VPTransformState. Add the cloned scalar to 2945 // the scalar map entry. 2946 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2947 2948 // If we just cloned a new assumption, add it the assumption cache. 2949 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2950 if (II->getIntrinsicID() == Intrinsic::assume) 2951 AC->registerAssumption(II); 2952 2953 // End if-block. 2954 if (IfPredicateInstr) 2955 PredicatedInstructions.push_back(Cloned); 2956 } 2957 2958 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2959 Value *End, Value *Step, 2960 Instruction *DL) { 2961 BasicBlock *Header = L->getHeader(); 2962 BasicBlock *Latch = L->getLoopLatch(); 2963 // As we're just creating this loop, it's possible no latch exists 2964 // yet. If so, use the header as this will be a single block loop. 2965 if (!Latch) 2966 Latch = Header; 2967 2968 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2969 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2970 setDebugLocFromInst(Builder, OldInst); 2971 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2972 2973 Builder.SetInsertPoint(Latch->getTerminator()); 2974 setDebugLocFromInst(Builder, OldInst); 2975 2976 // Create i+1 and fill the PHINode. 2977 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2978 Induction->addIncoming(Start, L->getLoopPreheader()); 2979 Induction->addIncoming(Next, Latch); 2980 // Create the compare. 2981 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2982 Builder.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 2983 2984 // Now we have two terminators. Remove the old one from the block. 2985 Latch->getTerminator()->eraseFromParent(); 2986 2987 return Induction; 2988 } 2989 2990 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2991 if (TripCount) 2992 return TripCount; 2993 2994 assert(L && "Create Trip Count for null loop."); 2995 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2996 // Find the loop boundaries. 2997 ScalarEvolution *SE = PSE.getSE(); 2998 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2999 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3000 "Invalid loop count"); 3001 3002 Type *IdxTy = Legal->getWidestInductionType(); 3003 assert(IdxTy && "No type for induction"); 3004 3005 // The exit count might have the type of i64 while the phi is i32. This can 3006 // happen if we have an induction variable that is sign extended before the 3007 // compare. The only way that we get a backedge taken count is that the 3008 // induction variable was signed and as such will not overflow. In such a case 3009 // truncation is legal. 3010 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3011 IdxTy->getPrimitiveSizeInBits()) 3012 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3013 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3014 3015 // Get the total trip count from the count by adding 1. 3016 const SCEV *ExitCount = SE->getAddExpr( 3017 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3018 3019 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3020 3021 // Expand the trip count and place the new instructions in the preheader. 3022 // Notice that the pre-header does not change, only the loop body. 3023 SCEVExpander Exp(*SE, DL, "induction"); 3024 3025 // Count holds the overall loop count (N). 3026 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3027 L->getLoopPreheader()->getTerminator()); 3028 3029 if (TripCount->getType()->isPointerTy()) 3030 TripCount = 3031 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3032 L->getLoopPreheader()->getTerminator()); 3033 3034 return TripCount; 3035 } 3036 3037 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3038 if (VectorTripCount) 3039 return VectorTripCount; 3040 3041 Value *TC = getOrCreateTripCount(L); 3042 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3043 3044 Type *Ty = TC->getType(); 3045 // This is where we can make the step a runtime constant. 3046 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3047 3048 // If the tail is to be folded by masking, round the number of iterations N 3049 // up to a multiple of Step instead of rounding down. This is done by first 3050 // adding Step-1 and then rounding down. Note that it's ok if this addition 3051 // overflows: the vector induction variable will eventually wrap to zero given 3052 // that it starts at zero and its Step is a power of two; the loop will then 3053 // exit, with the last early-exit vector comparison also producing all-true. 3054 if (Cost->foldTailByMasking()) { 3055 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3056 "VF*UF must be a power of 2 when folding tail by masking"); 3057 assert(!VF.isScalable() && 3058 "Tail folding not yet supported for scalable vectors"); 3059 TC = Builder.CreateAdd( 3060 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3061 } 3062 3063 // Now we need to generate the expression for the part of the loop that the 3064 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3065 // iterations are not required for correctness, or N - Step, otherwise. Step 3066 // is equal to the vectorization factor (number of SIMD elements) times the 3067 // unroll factor (number of SIMD instructions). 3068 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3069 3070 // There are two cases where we need to ensure (at least) the last iteration 3071 // runs in the scalar remainder loop. Thus, if the step evenly divides 3072 // the trip count, we set the remainder to be equal to the step. If the step 3073 // does not evenly divide the trip count, no adjustment is necessary since 3074 // there will already be scalar iterations. Note that the minimum iterations 3075 // check ensures that N >= Step. The cases are: 3076 // 1) If there is a non-reversed interleaved group that may speculatively 3077 // access memory out-of-bounds. 3078 // 2) If any instruction may follow a conditionally taken exit. That is, if 3079 // the loop contains multiple exiting blocks, or a single exiting block 3080 // which is not the latch. 3081 if (VF.isVector() && Cost->requiresScalarEpilogue()) { 3082 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3083 R = Builder.CreateSelect(IsZero, Step, R); 3084 } 3085 3086 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3087 3088 return VectorTripCount; 3089 } 3090 3091 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3092 const DataLayout &DL) { 3093 // Verify that V is a vector type with same number of elements as DstVTy. 3094 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3095 unsigned VF = DstFVTy->getNumElements(); 3096 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3097 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3098 Type *SrcElemTy = SrcVecTy->getElementType(); 3099 Type *DstElemTy = DstFVTy->getElementType(); 3100 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3101 "Vector elements must have same size"); 3102 3103 // Do a direct cast if element types are castable. 3104 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3105 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3106 } 3107 // V cannot be directly casted to desired vector type. 3108 // May happen when V is a floating point vector but DstVTy is a vector of 3109 // pointers or vice-versa. Handle this using a two-step bitcast using an 3110 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3111 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3112 "Only one type should be a pointer type"); 3113 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3114 "Only one type should be a floating point type"); 3115 Type *IntTy = 3116 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3117 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3118 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3119 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3120 } 3121 3122 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3123 BasicBlock *Bypass) { 3124 Value *Count = getOrCreateTripCount(L); 3125 // Reuse existing vector loop preheader for TC checks. 3126 // Note that new preheader block is generated for vector loop. 3127 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3128 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3129 3130 // Generate code to check if the loop's trip count is less than VF * UF, or 3131 // equal to it in case a scalar epilogue is required; this implies that the 3132 // vector trip count is zero. This check also covers the case where adding one 3133 // to the backedge-taken count overflowed leading to an incorrect trip count 3134 // of zero. In this case we will also jump to the scalar loop. 3135 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 3136 : ICmpInst::ICMP_ULT; 3137 3138 // If tail is to be folded, vector loop takes care of all iterations. 3139 Value *CheckMinIters = Builder.getFalse(); 3140 if (!Cost->foldTailByMasking()) { 3141 Value *Step = 3142 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3143 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3144 } 3145 // Create new preheader for vector loop. 3146 LoopVectorPreHeader = 3147 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3148 "vector.ph"); 3149 3150 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3151 DT->getNode(Bypass)->getIDom()) && 3152 "TC check is expected to dominate Bypass"); 3153 3154 // Update dominator for Bypass & LoopExit. 3155 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3156 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3157 3158 ReplaceInstWithInst( 3159 TCCheckBlock->getTerminator(), 3160 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3161 LoopBypassBlocks.push_back(TCCheckBlock); 3162 } 3163 3164 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3165 // Reuse existing vector loop preheader for SCEV checks. 3166 // Note that new preheader block is generated for vector loop. 3167 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 3168 3169 // Generate the code to check that the SCEV assumptions that we made. 3170 // We want the new basic block to start at the first instruction in a 3171 // sequence of instructions that form a check. 3172 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3173 "scev.check"); 3174 Value *SCEVCheck = Exp.expandCodeForPredicate( 3175 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 3176 3177 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3178 if (C->isZero()) 3179 return; 3180 3181 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3182 (OptForSizeBasedOnProfile && 3183 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3184 "Cannot SCEV check stride or overflow when optimizing for size"); 3185 3186 SCEVCheckBlock->setName("vector.scevcheck"); 3187 // Create new preheader for vector loop. 3188 LoopVectorPreHeader = 3189 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 3190 nullptr, "vector.ph"); 3191 3192 // Update dominator only if this is first RT check. 3193 if (LoopBypassBlocks.empty()) { 3194 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3195 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3196 } 3197 3198 ReplaceInstWithInst( 3199 SCEVCheckBlock->getTerminator(), 3200 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 3201 LoopBypassBlocks.push_back(SCEVCheckBlock); 3202 AddedSafetyChecks = true; 3203 } 3204 3205 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3206 // VPlan-native path does not do any analysis for runtime checks currently. 3207 if (EnableVPlanNativePath) 3208 return; 3209 3210 // Reuse existing vector loop preheader for runtime memory checks. 3211 // Note that new preheader block is generated for vector loop. 3212 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 3213 3214 // Generate the code that checks in runtime if arrays overlap. We put the 3215 // checks into a separate block to make the more common case of few elements 3216 // faster. 3217 auto *LAI = Legal->getLAI(); 3218 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking(); 3219 if (!RtPtrChecking.Need) 3220 return; 3221 3222 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3223 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3224 "Cannot emit memory checks when optimizing for size, unless forced " 3225 "to vectorize."); 3226 ORE->emit([&]() { 3227 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3228 L->getStartLoc(), L->getHeader()) 3229 << "Code-size may be reduced by not forcing " 3230 "vectorization, or by source-code modifications " 3231 "eliminating the need for runtime checks " 3232 "(e.g., adding 'restrict')."; 3233 }); 3234 } 3235 3236 MemCheckBlock->setName("vector.memcheck"); 3237 // Create new preheader for vector loop. 3238 LoopVectorPreHeader = 3239 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 3240 "vector.ph"); 3241 3242 auto *CondBranch = cast<BranchInst>( 3243 Builder.CreateCondBr(Builder.getTrue(), Bypass, LoopVectorPreHeader)); 3244 ReplaceInstWithInst(MemCheckBlock->getTerminator(), CondBranch); 3245 LoopBypassBlocks.push_back(MemCheckBlock); 3246 AddedSafetyChecks = true; 3247 3248 // Update dominator only if this is first RT check. 3249 if (LoopBypassBlocks.empty()) { 3250 DT->changeImmediateDominator(Bypass, MemCheckBlock); 3251 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 3252 } 3253 3254 Instruction *FirstCheckInst; 3255 Instruction *MemRuntimeCheck; 3256 SCEVExpander Exp(*PSE.getSE(), MemCheckBlock->getModule()->getDataLayout(), 3257 "induction"); 3258 std::tie(FirstCheckInst, MemRuntimeCheck) = addRuntimeChecks( 3259 MemCheckBlock->getTerminator(), OrigLoop, RtPtrChecking.getChecks(), Exp); 3260 assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking " 3261 "claimed checks are required"); 3262 CondBranch->setCondition(MemRuntimeCheck); 3263 3264 // We currently don't use LoopVersioning for the actual loop cloning but we 3265 // still use it to add the noalias metadata. 3266 LVer = std::make_unique<LoopVersioning>( 3267 *Legal->getLAI(), 3268 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3269 DT, PSE.getSE()); 3270 LVer->prepareNoAliasMetadata(); 3271 } 3272 3273 Value *InnerLoopVectorizer::emitTransformedIndex( 3274 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3275 const InductionDescriptor &ID) const { 3276 3277 SCEVExpander Exp(*SE, DL, "induction"); 3278 auto Step = ID.getStep(); 3279 auto StartValue = ID.getStartValue(); 3280 assert(Index->getType() == Step->getType() && 3281 "Index type does not match StepValue type"); 3282 3283 // Note: the IR at this point is broken. We cannot use SE to create any new 3284 // SCEV and then expand it, hoping that SCEV's simplification will give us 3285 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3286 // lead to various SCEV crashes. So all we can do is to use builder and rely 3287 // on InstCombine for future simplifications. Here we handle some trivial 3288 // cases only. 3289 auto CreateAdd = [&B](Value *X, Value *Y) { 3290 assert(X->getType() == Y->getType() && "Types don't match!"); 3291 if (auto *CX = dyn_cast<ConstantInt>(X)) 3292 if (CX->isZero()) 3293 return Y; 3294 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3295 if (CY->isZero()) 3296 return X; 3297 return B.CreateAdd(X, Y); 3298 }; 3299 3300 auto CreateMul = [&B](Value *X, Value *Y) { 3301 assert(X->getType() == Y->getType() && "Types don't match!"); 3302 if (auto *CX = dyn_cast<ConstantInt>(X)) 3303 if (CX->isOne()) 3304 return Y; 3305 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3306 if (CY->isOne()) 3307 return X; 3308 return B.CreateMul(X, Y); 3309 }; 3310 3311 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3312 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3313 // the DomTree is not kept up-to-date for additional blocks generated in the 3314 // vector loop. By using the header as insertion point, we guarantee that the 3315 // expanded instructions dominate all their uses. 3316 auto GetInsertPoint = [this, &B]() { 3317 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3318 if (InsertBB != LoopVectorBody && 3319 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3320 return LoopVectorBody->getTerminator(); 3321 return &*B.GetInsertPoint(); 3322 }; 3323 switch (ID.getKind()) { 3324 case InductionDescriptor::IK_IntInduction: { 3325 assert(Index->getType() == StartValue->getType() && 3326 "Index type does not match StartValue type"); 3327 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3328 return B.CreateSub(StartValue, Index); 3329 auto *Offset = CreateMul( 3330 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3331 return CreateAdd(StartValue, Offset); 3332 } 3333 case InductionDescriptor::IK_PtrInduction: { 3334 assert(isa<SCEVConstant>(Step) && 3335 "Expected constant step for pointer induction"); 3336 return B.CreateGEP( 3337 StartValue->getType()->getPointerElementType(), StartValue, 3338 CreateMul(Index, 3339 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 3340 } 3341 case InductionDescriptor::IK_FpInduction: { 3342 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3343 auto InductionBinOp = ID.getInductionBinOp(); 3344 assert(InductionBinOp && 3345 (InductionBinOp->getOpcode() == Instruction::FAdd || 3346 InductionBinOp->getOpcode() == Instruction::FSub) && 3347 "Original bin op should be defined for FP induction"); 3348 3349 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3350 3351 // Floating point operations had to be 'fast' to enable the induction. 3352 FastMathFlags Flags; 3353 Flags.setFast(); 3354 3355 Value *MulExp = B.CreateFMul(StepValue, Index); 3356 if (isa<Instruction>(MulExp)) 3357 // We have to check, the MulExp may be a constant. 3358 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 3359 3360 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3361 "induction"); 3362 if (isa<Instruction>(BOp)) 3363 cast<Instruction>(BOp)->setFastMathFlags(Flags); 3364 3365 return BOp; 3366 } 3367 case InductionDescriptor::IK_NoInduction: 3368 return nullptr; 3369 } 3370 llvm_unreachable("invalid enum"); 3371 } 3372 3373 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3374 LoopScalarBody = OrigLoop->getHeader(); 3375 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3376 LoopExitBlock = OrigLoop->getUniqueExitBlock(); 3377 assert(LoopExitBlock && "Must have an exit block"); 3378 assert(LoopVectorPreHeader && "Invalid loop structure"); 3379 3380 LoopMiddleBlock = 3381 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3382 LI, nullptr, Twine(Prefix) + "middle.block"); 3383 LoopScalarPreHeader = 3384 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3385 nullptr, Twine(Prefix) + "scalar.ph"); 3386 3387 // Set up branch from middle block to the exit and scalar preheader blocks. 3388 // completeLoopSkeleton will update the condition to use an iteration check, 3389 // if required to decide whether to execute the remainder. 3390 BranchInst *BrInst = 3391 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, Builder.getTrue()); 3392 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3393 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3394 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3395 3396 // We intentionally don't let SplitBlock to update LoopInfo since 3397 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3398 // LoopVectorBody is explicitly added to the correct place few lines later. 3399 LoopVectorBody = 3400 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3401 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3402 3403 // Update dominator for loop exit. 3404 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3405 3406 // Create and register the new vector loop. 3407 Loop *Lp = LI->AllocateLoop(); 3408 Loop *ParentLoop = OrigLoop->getParentLoop(); 3409 3410 // Insert the new loop into the loop nest and register the new basic blocks 3411 // before calling any utilities such as SCEV that require valid LoopInfo. 3412 if (ParentLoop) { 3413 ParentLoop->addChildLoop(Lp); 3414 } else { 3415 LI->addTopLevelLoop(Lp); 3416 } 3417 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3418 return Lp; 3419 } 3420 3421 void InnerLoopVectorizer::createInductionResumeValues( 3422 Loop *L, Value *VectorTripCount, 3423 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3424 assert(VectorTripCount && L && "Expected valid arguments"); 3425 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3426 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3427 "Inconsistent information about additional bypass."); 3428 // We are going to resume the execution of the scalar loop. 3429 // Go over all of the induction variables that we found and fix the 3430 // PHIs that are left in the scalar version of the loop. 3431 // The starting values of PHI nodes depend on the counter of the last 3432 // iteration in the vectorized loop. 3433 // If we come from a bypass edge then we need to start from the original 3434 // start value. 3435 for (auto &InductionEntry : Legal->getInductionVars()) { 3436 PHINode *OrigPhi = InductionEntry.first; 3437 InductionDescriptor II = InductionEntry.second; 3438 3439 // Create phi nodes to merge from the backedge-taken check block. 3440 PHINode *BCResumeVal = 3441 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3442 LoopScalarPreHeader->getTerminator()); 3443 // Copy original phi DL over to the new one. 3444 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3445 Value *&EndValue = IVEndValues[OrigPhi]; 3446 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3447 if (OrigPhi == OldInduction) { 3448 // We know what the end value is. 3449 EndValue = VectorTripCount; 3450 } else { 3451 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3452 Type *StepType = II.getStep()->getType(); 3453 Instruction::CastOps CastOp = 3454 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3455 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3456 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3457 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3458 EndValue->setName("ind.end"); 3459 3460 // Compute the end value for the additional bypass (if applicable). 3461 if (AdditionalBypass.first) { 3462 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3463 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3464 StepType, true); 3465 CRD = 3466 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3467 EndValueFromAdditionalBypass = 3468 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3469 EndValueFromAdditionalBypass->setName("ind.end"); 3470 } 3471 } 3472 // The new PHI merges the original incoming value, in case of a bypass, 3473 // or the value at the end of the vectorized loop. 3474 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3475 3476 // Fix the scalar body counter (PHI node). 3477 // The old induction's phi node in the scalar body needs the truncated 3478 // value. 3479 for (BasicBlock *BB : LoopBypassBlocks) 3480 BCResumeVal->addIncoming(II.getStartValue(), BB); 3481 3482 if (AdditionalBypass.first) 3483 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3484 EndValueFromAdditionalBypass); 3485 3486 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3487 } 3488 } 3489 3490 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3491 MDNode *OrigLoopID) { 3492 assert(L && "Expected valid loop."); 3493 3494 // The trip counts should be cached by now. 3495 Value *Count = getOrCreateTripCount(L); 3496 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3497 3498 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3499 3500 // Add a check in the middle block to see if we have completed 3501 // all of the iterations in the first vector loop. 3502 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3503 // If tail is to be folded, we know we don't need to run the remainder. 3504 if (!Cost->foldTailByMasking()) { 3505 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3506 Count, VectorTripCount, "cmp.n", 3507 LoopMiddleBlock->getTerminator()); 3508 3509 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3510 // of the corresponding compare because they may have ended up with 3511 // different line numbers and we want to avoid awkward line stepping while 3512 // debugging. Eg. if the compare has got a line number inside the loop. 3513 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3514 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3515 } 3516 3517 // Get ready to start creating new instructions into the vectorized body. 3518 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3519 "Inconsistent vector loop preheader"); 3520 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3521 3522 Optional<MDNode *> VectorizedLoopID = 3523 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3524 LLVMLoopVectorizeFollowupVectorized}); 3525 if (VectorizedLoopID.hasValue()) { 3526 L->setLoopID(VectorizedLoopID.getValue()); 3527 3528 // Do not setAlreadyVectorized if loop attributes have been defined 3529 // explicitly. 3530 return LoopVectorPreHeader; 3531 } 3532 3533 // Keep all loop hints from the original loop on the vector loop (we'll 3534 // replace the vectorizer-specific hints below). 3535 if (MDNode *LID = OrigLoop->getLoopID()) 3536 L->setLoopID(LID); 3537 3538 LoopVectorizeHints Hints(L, true, *ORE); 3539 Hints.setAlreadyVectorized(); 3540 3541 #ifdef EXPENSIVE_CHECKS 3542 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3543 LI->verify(*DT); 3544 #endif 3545 3546 return LoopVectorPreHeader; 3547 } 3548 3549 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3550 /* 3551 In this function we generate a new loop. The new loop will contain 3552 the vectorized instructions while the old loop will continue to run the 3553 scalar remainder. 3554 3555 [ ] <-- loop iteration number check. 3556 / | 3557 / v 3558 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3559 | / | 3560 | / v 3561 || [ ] <-- vector pre header. 3562 |/ | 3563 | v 3564 | [ ] \ 3565 | [ ]_| <-- vector loop. 3566 | | 3567 | v 3568 | -[ ] <--- middle-block. 3569 | / | 3570 | / v 3571 -|- >[ ] <--- new preheader. 3572 | | 3573 | v 3574 | [ ] \ 3575 | [ ]_| <-- old scalar loop to handle remainder. 3576 \ | 3577 \ v 3578 >[ ] <-- exit block. 3579 ... 3580 */ 3581 3582 // Get the metadata of the original loop before it gets modified. 3583 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3584 3585 // Create an empty vector loop, and prepare basic blocks for the runtime 3586 // checks. 3587 Loop *Lp = createVectorLoopSkeleton(""); 3588 3589 // Now, compare the new count to zero. If it is zero skip the vector loop and 3590 // jump to the scalar loop. This check also covers the case where the 3591 // backedge-taken count is uint##_max: adding one to it will overflow leading 3592 // to an incorrect trip count of zero. In this (rare) case we will also jump 3593 // to the scalar loop. 3594 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3595 3596 // Generate the code to check any assumptions that we've made for SCEV 3597 // expressions. 3598 emitSCEVChecks(Lp, LoopScalarPreHeader); 3599 3600 // Generate the code that checks in runtime if arrays overlap. We put the 3601 // checks into a separate block to make the more common case of few elements 3602 // faster. 3603 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3604 3605 // Some loops have a single integer induction variable, while other loops 3606 // don't. One example is c++ iterators that often have multiple pointer 3607 // induction variables. In the code below we also support a case where we 3608 // don't have a single induction variable. 3609 // 3610 // We try to obtain an induction variable from the original loop as hard 3611 // as possible. However if we don't find one that: 3612 // - is an integer 3613 // - counts from zero, stepping by one 3614 // - is the size of the widest induction variable type 3615 // then we create a new one. 3616 OldInduction = Legal->getPrimaryInduction(); 3617 Type *IdxTy = Legal->getWidestInductionType(); 3618 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3619 // The loop step is equal to the vectorization factor (num of SIMD elements) 3620 // times the unroll factor (num of SIMD instructions). 3621 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3622 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3623 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3624 Induction = 3625 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3626 getDebugLocFromInstOrOperands(OldInduction)); 3627 3628 // Emit phis for the new starting index of the scalar loop. 3629 createInductionResumeValues(Lp, CountRoundDown); 3630 3631 return completeLoopSkeleton(Lp, OrigLoopID); 3632 } 3633 3634 // Fix up external users of the induction variable. At this point, we are 3635 // in LCSSA form, with all external PHIs that use the IV having one input value, 3636 // coming from the remainder loop. We need those PHIs to also have a correct 3637 // value for the IV when arriving directly from the middle block. 3638 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3639 const InductionDescriptor &II, 3640 Value *CountRoundDown, Value *EndValue, 3641 BasicBlock *MiddleBlock) { 3642 // There are two kinds of external IV usages - those that use the value 3643 // computed in the last iteration (the PHI) and those that use the penultimate 3644 // value (the value that feeds into the phi from the loop latch). 3645 // We allow both, but they, obviously, have different values. 3646 3647 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3648 3649 DenseMap<Value *, Value *> MissingVals; 3650 3651 // An external user of the last iteration's value should see the value that 3652 // the remainder loop uses to initialize its own IV. 3653 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3654 for (User *U : PostInc->users()) { 3655 Instruction *UI = cast<Instruction>(U); 3656 if (!OrigLoop->contains(UI)) { 3657 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3658 MissingVals[UI] = EndValue; 3659 } 3660 } 3661 3662 // An external user of the penultimate value need to see EndValue - Step. 3663 // The simplest way to get this is to recompute it from the constituent SCEVs, 3664 // that is Start + (Step * (CRD - 1)). 3665 for (User *U : OrigPhi->users()) { 3666 auto *UI = cast<Instruction>(U); 3667 if (!OrigLoop->contains(UI)) { 3668 const DataLayout &DL = 3669 OrigLoop->getHeader()->getModule()->getDataLayout(); 3670 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3671 3672 IRBuilder<> B(MiddleBlock->getTerminator()); 3673 Value *CountMinusOne = B.CreateSub( 3674 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3675 Value *CMO = 3676 !II.getStep()->getType()->isIntegerTy() 3677 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3678 II.getStep()->getType()) 3679 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3680 CMO->setName("cast.cmo"); 3681 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3682 Escape->setName("ind.escape"); 3683 MissingVals[UI] = Escape; 3684 } 3685 } 3686 3687 for (auto &I : MissingVals) { 3688 PHINode *PHI = cast<PHINode>(I.first); 3689 // One corner case we have to handle is two IVs "chasing" each-other, 3690 // that is %IV2 = phi [...], [ %IV1, %latch ] 3691 // In this case, if IV1 has an external use, we need to avoid adding both 3692 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3693 // don't already have an incoming value for the middle block. 3694 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3695 PHI->addIncoming(I.second, MiddleBlock); 3696 } 3697 } 3698 3699 namespace { 3700 3701 struct CSEDenseMapInfo { 3702 static bool canHandle(const Instruction *I) { 3703 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3704 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3705 } 3706 3707 static inline Instruction *getEmptyKey() { 3708 return DenseMapInfo<Instruction *>::getEmptyKey(); 3709 } 3710 3711 static inline Instruction *getTombstoneKey() { 3712 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3713 } 3714 3715 static unsigned getHashValue(const Instruction *I) { 3716 assert(canHandle(I) && "Unknown instruction!"); 3717 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3718 I->value_op_end())); 3719 } 3720 3721 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3722 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3723 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3724 return LHS == RHS; 3725 return LHS->isIdenticalTo(RHS); 3726 } 3727 }; 3728 3729 } // end anonymous namespace 3730 3731 ///Perform cse of induction variable instructions. 3732 static void cse(BasicBlock *BB) { 3733 // Perform simple cse. 3734 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3735 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3736 Instruction *In = &*I++; 3737 3738 if (!CSEDenseMapInfo::canHandle(In)) 3739 continue; 3740 3741 // Check if we can replace this instruction with any of the 3742 // visited instructions. 3743 if (Instruction *V = CSEMap.lookup(In)) { 3744 In->replaceAllUsesWith(V); 3745 In->eraseFromParent(); 3746 continue; 3747 } 3748 3749 CSEMap[In] = In; 3750 } 3751 } 3752 3753 InstructionCost 3754 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3755 bool &NeedToScalarize) { 3756 assert(!VF.isScalable() && "scalable vectors not yet supported."); 3757 Function *F = CI->getCalledFunction(); 3758 Type *ScalarRetTy = CI->getType(); 3759 SmallVector<Type *, 4> Tys, ScalarTys; 3760 for (auto &ArgOp : CI->arg_operands()) 3761 ScalarTys.push_back(ArgOp->getType()); 3762 3763 // Estimate cost of scalarized vector call. The source operands are assumed 3764 // to be vectors, so we need to extract individual elements from there, 3765 // execute VF scalar calls, and then gather the result into the vector return 3766 // value. 3767 InstructionCost ScalarCallCost = 3768 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3769 if (VF.isScalar()) 3770 return ScalarCallCost; 3771 3772 // Compute corresponding vector type for return value and arguments. 3773 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3774 for (Type *ScalarTy : ScalarTys) 3775 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3776 3777 // Compute costs of unpacking argument values for the scalar calls and 3778 // packing the return values to a vector. 3779 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3780 3781 InstructionCost Cost = 3782 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3783 3784 // If we can't emit a vector call for this function, then the currently found 3785 // cost is the cost we need to return. 3786 NeedToScalarize = true; 3787 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3788 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3789 3790 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3791 return Cost; 3792 3793 // If the corresponding vector cost is cheaper, return its cost. 3794 InstructionCost VectorCallCost = 3795 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3796 if (VectorCallCost < Cost) { 3797 NeedToScalarize = false; 3798 Cost = VectorCallCost; 3799 } 3800 return Cost; 3801 } 3802 3803 InstructionCost 3804 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3805 ElementCount VF) { 3806 auto MaybeVectorizeType = [](Type *Elt, ElementCount VF) -> Type * { 3807 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3808 return Elt; 3809 return VectorType::get(Elt, VF); 3810 }; 3811 3812 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3813 assert(ID && "Expected intrinsic call!"); 3814 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3815 FastMathFlags FMF; 3816 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3817 FMF = FPMO->getFastMathFlags(); 3818 3819 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); 3820 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3821 SmallVector<Type *> ParamTys; 3822 std::transform(FTy->param_begin(), FTy->param_end(), ParamTys.begin(), 3823 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3824 3825 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3826 dyn_cast<IntrinsicInst>(CI)); 3827 return TTI.getIntrinsicInstrCost(CostAttrs, 3828 TargetTransformInfo::TCK_RecipThroughput); 3829 } 3830 3831 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3832 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3833 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3834 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3835 } 3836 3837 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3838 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3839 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3840 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3841 } 3842 3843 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3844 // For every instruction `I` in MinBWs, truncate the operands, create a 3845 // truncated version of `I` and reextend its result. InstCombine runs 3846 // later and will remove any ext/trunc pairs. 3847 SmallPtrSet<Value *, 4> Erased; 3848 for (const auto &KV : Cost->getMinimalBitwidths()) { 3849 // If the value wasn't vectorized, we must maintain the original scalar 3850 // type. The absence of the value from VectorLoopValueMap indicates that it 3851 // wasn't vectorized. 3852 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3853 continue; 3854 for (unsigned Part = 0; Part < UF; ++Part) { 3855 Value *I = getOrCreateVectorValue(KV.first, Part); 3856 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3857 continue; 3858 Type *OriginalTy = I->getType(); 3859 Type *ScalarTruncatedTy = 3860 IntegerType::get(OriginalTy->getContext(), KV.second); 3861 auto *TruncatedTy = FixedVectorType::get( 3862 ScalarTruncatedTy, 3863 cast<FixedVectorType>(OriginalTy)->getNumElements()); 3864 if (TruncatedTy == OriginalTy) 3865 continue; 3866 3867 IRBuilder<> B(cast<Instruction>(I)); 3868 auto ShrinkOperand = [&](Value *V) -> Value * { 3869 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3870 if (ZI->getSrcTy() == TruncatedTy) 3871 return ZI->getOperand(0); 3872 return B.CreateZExtOrTrunc(V, TruncatedTy); 3873 }; 3874 3875 // The actual instruction modification depends on the instruction type, 3876 // unfortunately. 3877 Value *NewI = nullptr; 3878 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3879 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3880 ShrinkOperand(BO->getOperand(1))); 3881 3882 // Any wrapping introduced by shrinking this operation shouldn't be 3883 // considered undefined behavior. So, we can't unconditionally copy 3884 // arithmetic wrapping flags to NewI. 3885 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3886 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3887 NewI = 3888 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3889 ShrinkOperand(CI->getOperand(1))); 3890 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3891 NewI = B.CreateSelect(SI->getCondition(), 3892 ShrinkOperand(SI->getTrueValue()), 3893 ShrinkOperand(SI->getFalseValue())); 3894 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3895 switch (CI->getOpcode()) { 3896 default: 3897 llvm_unreachable("Unhandled cast!"); 3898 case Instruction::Trunc: 3899 NewI = ShrinkOperand(CI->getOperand(0)); 3900 break; 3901 case Instruction::SExt: 3902 NewI = B.CreateSExtOrTrunc( 3903 CI->getOperand(0), 3904 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3905 break; 3906 case Instruction::ZExt: 3907 NewI = B.CreateZExtOrTrunc( 3908 CI->getOperand(0), 3909 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3910 break; 3911 } 3912 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3913 auto Elements0 = cast<FixedVectorType>(SI->getOperand(0)->getType()) 3914 ->getNumElements(); 3915 auto *O0 = B.CreateZExtOrTrunc( 3916 SI->getOperand(0), 3917 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3918 auto Elements1 = cast<FixedVectorType>(SI->getOperand(1)->getType()) 3919 ->getNumElements(); 3920 auto *O1 = B.CreateZExtOrTrunc( 3921 SI->getOperand(1), 3922 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3923 3924 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3925 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3926 // Don't do anything with the operands, just extend the result. 3927 continue; 3928 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3929 auto Elements = cast<FixedVectorType>(IE->getOperand(0)->getType()) 3930 ->getNumElements(); 3931 auto *O0 = B.CreateZExtOrTrunc( 3932 IE->getOperand(0), 3933 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3934 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3935 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3936 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3937 auto Elements = cast<FixedVectorType>(EE->getOperand(0)->getType()) 3938 ->getNumElements(); 3939 auto *O0 = B.CreateZExtOrTrunc( 3940 EE->getOperand(0), 3941 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3942 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3943 } else { 3944 // If we don't know what to do, be conservative and don't do anything. 3945 continue; 3946 } 3947 3948 // Lastly, extend the result. 3949 NewI->takeName(cast<Instruction>(I)); 3950 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3951 I->replaceAllUsesWith(Res); 3952 cast<Instruction>(I)->eraseFromParent(); 3953 Erased.insert(I); 3954 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3955 } 3956 } 3957 3958 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3959 for (const auto &KV : Cost->getMinimalBitwidths()) { 3960 // If the value wasn't vectorized, we must maintain the original scalar 3961 // type. The absence of the value from VectorLoopValueMap indicates that it 3962 // wasn't vectorized. 3963 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3964 continue; 3965 for (unsigned Part = 0; Part < UF; ++Part) { 3966 Value *I = getOrCreateVectorValue(KV.first, Part); 3967 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3968 if (Inst && Inst->use_empty()) { 3969 Value *NewI = Inst->getOperand(0); 3970 Inst->eraseFromParent(); 3971 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3972 } 3973 } 3974 } 3975 } 3976 3977 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 3978 // Insert truncates and extends for any truncated instructions as hints to 3979 // InstCombine. 3980 if (VF.isVector()) 3981 truncateToMinimalBitwidths(); 3982 3983 // Fix widened non-induction PHIs by setting up the PHI operands. 3984 if (OrigPHIsToFix.size()) { 3985 assert(EnableVPlanNativePath && 3986 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3987 fixNonInductionPHIs(State); 3988 } 3989 3990 // At this point every instruction in the original loop is widened to a 3991 // vector form. Now we need to fix the recurrences in the loop. These PHI 3992 // nodes are currently empty because we did not want to introduce cycles. 3993 // This is the second stage of vectorizing recurrences. 3994 fixCrossIterationPHIs(State); 3995 3996 // Forget the original basic block. 3997 PSE.getSE()->forgetLoop(OrigLoop); 3998 3999 // Fix-up external users of the induction variables. 4000 for (auto &Entry : Legal->getInductionVars()) 4001 fixupIVUsers(Entry.first, Entry.second, 4002 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4003 IVEndValues[Entry.first], LoopMiddleBlock); 4004 4005 fixLCSSAPHIs(State); 4006 for (Instruction *PI : PredicatedInstructions) 4007 sinkScalarOperands(&*PI); 4008 4009 // Remove redundant induction instructions. 4010 cse(LoopVectorBody); 4011 4012 // Set/update profile weights for the vector and remainder loops as original 4013 // loop iterations are now distributed among them. Note that original loop 4014 // represented by LoopScalarBody becomes remainder loop after vectorization. 4015 // 4016 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4017 // end up getting slightly roughened result but that should be OK since 4018 // profile is not inherently precise anyway. Note also possible bypass of 4019 // vector code caused by legality checks is ignored, assigning all the weight 4020 // to the vector loop, optimistically. 4021 // 4022 // For scalable vectorization we can't know at compile time how many iterations 4023 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4024 // vscale of '1'. 4025 setProfileInfoAfterUnrolling( 4026 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4027 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4028 } 4029 4030 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4031 // In order to support recurrences we need to be able to vectorize Phi nodes. 4032 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4033 // stage #2: We now need to fix the recurrences by adding incoming edges to 4034 // the currently empty PHI nodes. At this point every instruction in the 4035 // original loop is widened to a vector form so we can use them to construct 4036 // the incoming edges. 4037 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 4038 // Handle first-order recurrences and reductions that need to be fixed. 4039 if (Legal->isFirstOrderRecurrence(&Phi)) 4040 fixFirstOrderRecurrence(&Phi, State); 4041 else if (Legal->isReductionVariable(&Phi)) 4042 fixReduction(&Phi, State); 4043 } 4044 } 4045 4046 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi, 4047 VPTransformState &State) { 4048 // This is the second phase of vectorizing first-order recurrences. An 4049 // overview of the transformation is described below. Suppose we have the 4050 // following loop. 4051 // 4052 // for (int i = 0; i < n; ++i) 4053 // b[i] = a[i] - a[i - 1]; 4054 // 4055 // There is a first-order recurrence on "a". For this loop, the shorthand 4056 // scalar IR looks like: 4057 // 4058 // scalar.ph: 4059 // s_init = a[-1] 4060 // br scalar.body 4061 // 4062 // scalar.body: 4063 // i = phi [0, scalar.ph], [i+1, scalar.body] 4064 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4065 // s2 = a[i] 4066 // b[i] = s2 - s1 4067 // br cond, scalar.body, ... 4068 // 4069 // In this example, s1 is a recurrence because it's value depends on the 4070 // previous iteration. In the first phase of vectorization, we created a 4071 // temporary value for s1. We now complete the vectorization and produce the 4072 // shorthand vector IR shown below (for VF = 4, UF = 1). 4073 // 4074 // vector.ph: 4075 // v_init = vector(..., ..., ..., a[-1]) 4076 // br vector.body 4077 // 4078 // vector.body 4079 // i = phi [0, vector.ph], [i+4, vector.body] 4080 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4081 // v2 = a[i, i+1, i+2, i+3]; 4082 // v3 = vector(v1(3), v2(0, 1, 2)) 4083 // b[i, i+1, i+2, i+3] = v2 - v3 4084 // br cond, vector.body, middle.block 4085 // 4086 // middle.block: 4087 // x = v2(3) 4088 // br scalar.ph 4089 // 4090 // scalar.ph: 4091 // s_init = phi [x, middle.block], [a[-1], otherwise] 4092 // br scalar.body 4093 // 4094 // After execution completes the vector loop, we extract the next value of 4095 // the recurrence (x) to use as the initial value in the scalar loop. 4096 4097 // Get the original loop preheader and single loop latch. 4098 auto *Preheader = OrigLoop->getLoopPreheader(); 4099 auto *Latch = OrigLoop->getLoopLatch(); 4100 4101 // Get the initial and previous values of the scalar recurrence. 4102 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4103 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4104 4105 // Create a vector from the initial value. 4106 auto *VectorInit = ScalarInit; 4107 if (VF.isVector()) { 4108 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4109 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4110 VectorInit = Builder.CreateInsertElement( 4111 PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4112 Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); 4113 } 4114 4115 VPValue *PhiDef = State.Plan->getVPValue(Phi); 4116 VPValue *PreviousDef = State.Plan->getVPValue(Previous); 4117 // We constructed a temporary phi node in the first phase of vectorization. 4118 // This phi node will eventually be deleted. 4119 Builder.SetInsertPoint(cast<Instruction>(State.get(PhiDef, 0))); 4120 4121 // Create a phi node for the new recurrence. The current value will either be 4122 // the initial value inserted into a vector or loop-varying vector value. 4123 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4124 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4125 4126 // Get the vectorized previous value of the last part UF - 1. It appears last 4127 // among all unrolled iterations, due to the order of their construction. 4128 Value *PreviousLastPart = State.get(PreviousDef, UF - 1); 4129 4130 // Find and set the insertion point after the previous value if it is an 4131 // instruction. 4132 BasicBlock::iterator InsertPt; 4133 // Note that the previous value may have been constant-folded so it is not 4134 // guaranteed to be an instruction in the vector loop. 4135 // FIXME: Loop invariant values do not form recurrences. We should deal with 4136 // them earlier. 4137 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 4138 InsertPt = LoopVectorBody->getFirstInsertionPt(); 4139 else { 4140 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 4141 if (isa<PHINode>(PreviousLastPart)) 4142 // If the previous value is a phi node, we should insert after all the phi 4143 // nodes in the block containing the PHI to avoid breaking basic block 4144 // verification. Note that the basic block may be different to 4145 // LoopVectorBody, in case we predicate the loop. 4146 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 4147 else 4148 InsertPt = ++PreviousInst->getIterator(); 4149 } 4150 Builder.SetInsertPoint(&*InsertPt); 4151 4152 // We will construct a vector for the recurrence by combining the values for 4153 // the current and previous iterations. This is the required shuffle mask. 4154 assert(!VF.isScalable()); 4155 SmallVector<int, 8> ShuffleMask(VF.getKnownMinValue()); 4156 ShuffleMask[0] = VF.getKnownMinValue() - 1; 4157 for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) 4158 ShuffleMask[I] = I + VF.getKnownMinValue() - 1; 4159 4160 // The vector from which to take the initial value for the current iteration 4161 // (actual or unrolled). Initially, this is the vector phi node. 4162 Value *Incoming = VecPhi; 4163 4164 // Shuffle the current and previous vector and update the vector parts. 4165 for (unsigned Part = 0; Part < UF; ++Part) { 4166 Value *PreviousPart = State.get(PreviousDef, Part); 4167 Value *PhiPart = State.get(PhiDef, Part); 4168 auto *Shuffle = 4169 VF.isVector() 4170 ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) 4171 : Incoming; 4172 PhiPart->replaceAllUsesWith(Shuffle); 4173 cast<Instruction>(PhiPart)->eraseFromParent(); 4174 State.reset(PhiDef, Phi, Shuffle, Part); 4175 Incoming = PreviousPart; 4176 } 4177 4178 // Fix the latch value of the new recurrence in the vector loop. 4179 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4180 4181 // Extract the last vector element in the middle block. This will be the 4182 // initial value for the recurrence when jumping to the scalar loop. 4183 auto *ExtractForScalar = Incoming; 4184 if (VF.isVector()) { 4185 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4186 ExtractForScalar = Builder.CreateExtractElement( 4187 ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), 4188 "vector.recur.extract"); 4189 } 4190 // Extract the second last element in the middle block if the 4191 // Phi is used outside the loop. We need to extract the phi itself 4192 // and not the last element (the phi update in the current iteration). This 4193 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4194 // when the scalar loop is not run at all. 4195 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4196 if (VF.isVector()) 4197 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4198 Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), 4199 "vector.recur.extract.for.phi"); 4200 // When loop is unrolled without vectorizing, initialize 4201 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 4202 // `Incoming`. This is analogous to the vectorized case above: extracting the 4203 // second last element when VF > 1. 4204 else if (UF > 1) 4205 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4206 4207 // Fix the initial value of the original recurrence in the scalar loop. 4208 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4209 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4210 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4211 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4212 Start->addIncoming(Incoming, BB); 4213 } 4214 4215 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4216 Phi->setName("scalar.recur"); 4217 4218 // Finally, fix users of the recurrence outside the loop. The users will need 4219 // either the last value of the scalar recurrence or the last value of the 4220 // vector recurrence we extracted in the middle block. Since the loop is in 4221 // LCSSA form, we just need to find all the phi nodes for the original scalar 4222 // recurrence in the exit block, and then add an edge for the middle block. 4223 // Note that LCSSA does not imply single entry when the original scalar loop 4224 // had multiple exiting edges (as we always run the last iteration in the 4225 // scalar epilogue); in that case, the exiting path through middle will be 4226 // dynamically dead and the value picked for the phi doesn't matter. 4227 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4228 if (any_of(LCSSAPhi.incoming_values(), 4229 [Phi](Value *V) { return V == Phi; })) 4230 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4231 } 4232 4233 void InnerLoopVectorizer::fixReduction(PHINode *Phi, VPTransformState &State) { 4234 // Get it's reduction variable descriptor. 4235 assert(Legal->isReductionVariable(Phi) && 4236 "Unable to find the reduction variable"); 4237 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4238 4239 RecurKind RK = RdxDesc.getRecurrenceKind(); 4240 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4241 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4242 setDebugLocFromInst(Builder, ReductionStartValue); 4243 bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi); 4244 4245 VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); 4246 // This is the vector-clone of the value that leaves the loop. 4247 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4248 4249 // Wrap flags are in general invalid after vectorization, clear them. 4250 clearReductionWrapFlags(RdxDesc); 4251 4252 // Fix the vector-loop phi. 4253 4254 // Reductions do not have to start at zero. They can start with 4255 // any loop invariant values. 4256 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4257 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 4258 4259 for (unsigned Part = 0; Part < UF; ++Part) { 4260 Value *VecRdxPhi = State.get(State.Plan->getVPValue(Phi), Part); 4261 Value *Val = State.get(State.Plan->getVPValue(LoopVal), Part); 4262 cast<PHINode>(VecRdxPhi) 4263 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4264 } 4265 4266 // Before each round, move the insertion point right between 4267 // the PHIs and the values we are going to write. 4268 // This allows us to write both PHINodes and the extractelement 4269 // instructions. 4270 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4271 4272 setDebugLocFromInst(Builder, LoopExitInst); 4273 4274 // If tail is folded by masking, the vector value to leave the loop should be 4275 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4276 // instead of the former. For an inloop reduction the reduction will already 4277 // be predicated, and does not need to be handled here. 4278 if (Cost->foldTailByMasking() && !IsInLoopReductionPhi) { 4279 for (unsigned Part = 0; Part < UF; ++Part) { 4280 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4281 Value *Sel = nullptr; 4282 for (User *U : VecLoopExitInst->users()) { 4283 if (isa<SelectInst>(U)) { 4284 assert(!Sel && "Reduction exit feeding two selects"); 4285 Sel = U; 4286 } else 4287 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4288 } 4289 assert(Sel && "Reduction exit feeds no select"); 4290 State.reset(LoopExitInstDef, LoopExitInst, Sel, Part); 4291 4292 // If the target can create a predicated operator for the reduction at no 4293 // extra cost in the loop (for example a predicated vadd), it can be 4294 // cheaper for the select to remain in the loop than be sunk out of it, 4295 // and so use the select value for the phi instead of the old 4296 // LoopExitValue. 4297 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 4298 if (PreferPredicatedReductionSelect || 4299 TTI->preferPredicatedReductionSelect( 4300 RdxDesc.getOpcode(), Phi->getType(), 4301 TargetTransformInfo::ReductionFlags())) { 4302 auto *VecRdxPhi = 4303 cast<PHINode>(State.get(State.Plan->getVPValue(Phi), Part)); 4304 VecRdxPhi->setIncomingValueForBlock( 4305 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4306 } 4307 } 4308 } 4309 4310 // If the vector reduction can be performed in a smaller type, we truncate 4311 // then extend the loop exit value to enable InstCombine to evaluate the 4312 // entire expression in the smaller type. 4313 if (VF.isVector() && Phi->getType() != RdxDesc.getRecurrenceType()) { 4314 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 4315 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4316 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4317 Builder.SetInsertPoint( 4318 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4319 VectorParts RdxParts(UF); 4320 for (unsigned Part = 0; Part < UF; ++Part) { 4321 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4322 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4323 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4324 : Builder.CreateZExt(Trunc, VecTy); 4325 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4326 UI != RdxParts[Part]->user_end();) 4327 if (*UI != Trunc) { 4328 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4329 RdxParts[Part] = Extnd; 4330 } else { 4331 ++UI; 4332 } 4333 } 4334 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4335 for (unsigned Part = 0; Part < UF; ++Part) { 4336 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4337 State.reset(LoopExitInstDef, LoopExitInst, RdxParts[Part], Part); 4338 } 4339 } 4340 4341 // Reduce all of the unrolled parts into a single vector. 4342 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4343 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4344 4345 // The middle block terminator has already been assigned a DebugLoc here (the 4346 // OrigLoop's single latch terminator). We want the whole middle block to 4347 // appear to execute on this line because: (a) it is all compiler generated, 4348 // (b) these instructions are always executed after evaluating the latch 4349 // conditional branch, and (c) other passes may add new predecessors which 4350 // terminate on this line. This is the easiest way to ensure we don't 4351 // accidentally cause an extra step back into the loop while debugging. 4352 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 4353 { 4354 // Floating-point operations should have some FMF to enable the reduction. 4355 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4356 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4357 for (unsigned Part = 1; Part < UF; ++Part) { 4358 Value *RdxPart = State.get(LoopExitInstDef, Part); 4359 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4360 ReducedPartRdx = Builder.CreateBinOp( 4361 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4362 } else { 4363 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4364 } 4365 } 4366 } 4367 4368 // Create the reduction after the loop. Note that inloop reductions create the 4369 // target reduction in the loop using a Reduction recipe. 4370 if (VF.isVector() && !IsInLoopReductionPhi) { 4371 ReducedPartRdx = 4372 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4373 // If the reduction can be performed in a smaller type, we need to extend 4374 // the reduction to the wider type before we branch to the original loop. 4375 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4376 ReducedPartRdx = 4377 RdxDesc.isSigned() 4378 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4379 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4380 } 4381 4382 // Create a phi node that merges control-flow from the backedge-taken check 4383 // block and the middle block. 4384 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4385 LoopScalarPreHeader->getTerminator()); 4386 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4387 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4388 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4389 4390 // Now, we need to fix the users of the reduction variable 4391 // inside and outside of the scalar remainder loop. 4392 4393 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4394 // in the exit blocks. See comment on analogous loop in 4395 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4396 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4397 if (any_of(LCSSAPhi.incoming_values(), 4398 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4399 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4400 4401 // Fix the scalar loop reduction variable with the incoming reduction sum 4402 // from the vector body and from the backedge value. 4403 int IncomingEdgeBlockIdx = 4404 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4405 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4406 // Pick the other block. 4407 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4408 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4409 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4410 } 4411 4412 void InnerLoopVectorizer::clearReductionWrapFlags( 4413 RecurrenceDescriptor &RdxDesc) { 4414 RecurKind RK = RdxDesc.getRecurrenceKind(); 4415 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4416 return; 4417 4418 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4419 assert(LoopExitInstr && "null loop exit instruction"); 4420 SmallVector<Instruction *, 8> Worklist; 4421 SmallPtrSet<Instruction *, 8> Visited; 4422 Worklist.push_back(LoopExitInstr); 4423 Visited.insert(LoopExitInstr); 4424 4425 while (!Worklist.empty()) { 4426 Instruction *Cur = Worklist.pop_back_val(); 4427 if (isa<OverflowingBinaryOperator>(Cur)) 4428 for (unsigned Part = 0; Part < UF; ++Part) { 4429 Value *V = getOrCreateVectorValue(Cur, Part); 4430 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4431 } 4432 4433 for (User *U : Cur->users()) { 4434 Instruction *UI = cast<Instruction>(U); 4435 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4436 Visited.insert(UI).second) 4437 Worklist.push_back(UI); 4438 } 4439 } 4440 } 4441 4442 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4443 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4444 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4445 // Some phis were already hand updated by the reduction and recurrence 4446 // code above, leave them alone. 4447 continue; 4448 4449 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4450 // Non-instruction incoming values will have only one value. 4451 unsigned LastLane = 0; 4452 if (isa<Instruction>(IncomingValue)) 4453 LastLane = Cost->isUniformAfterVectorization( 4454 cast<Instruction>(IncomingValue), VF) 4455 ? 0 4456 : VF.getKnownMinValue() - 1; 4457 assert((!VF.isScalable() || LastLane == 0) && 4458 "scalable vectors dont support non-uniform scalars yet"); 4459 // Can be a loop invariant incoming value or the last scalar value to be 4460 // extracted from the vectorized loop. 4461 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4462 Value *lastIncomingValue = 4463 OrigLoop->isLoopInvariant(IncomingValue) 4464 ? IncomingValue 4465 : State.get(State.Plan->getVPValue(IncomingValue), 4466 VPIteration(UF - 1, LastLane)); 4467 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4468 } 4469 } 4470 4471 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4472 // The basic block and loop containing the predicated instruction. 4473 auto *PredBB = PredInst->getParent(); 4474 auto *VectorLoop = LI->getLoopFor(PredBB); 4475 4476 // Initialize a worklist with the operands of the predicated instruction. 4477 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4478 4479 // Holds instructions that we need to analyze again. An instruction may be 4480 // reanalyzed if we don't yet know if we can sink it or not. 4481 SmallVector<Instruction *, 8> InstsToReanalyze; 4482 4483 // Returns true if a given use occurs in the predicated block. Phi nodes use 4484 // their operands in their corresponding predecessor blocks. 4485 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4486 auto *I = cast<Instruction>(U.getUser()); 4487 BasicBlock *BB = I->getParent(); 4488 if (auto *Phi = dyn_cast<PHINode>(I)) 4489 BB = Phi->getIncomingBlock( 4490 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4491 return BB == PredBB; 4492 }; 4493 4494 // Iteratively sink the scalarized operands of the predicated instruction 4495 // into the block we created for it. When an instruction is sunk, it's 4496 // operands are then added to the worklist. The algorithm ends after one pass 4497 // through the worklist doesn't sink a single instruction. 4498 bool Changed; 4499 do { 4500 // Add the instructions that need to be reanalyzed to the worklist, and 4501 // reset the changed indicator. 4502 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4503 InstsToReanalyze.clear(); 4504 Changed = false; 4505 4506 while (!Worklist.empty()) { 4507 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4508 4509 // We can't sink an instruction if it is a phi node, is already in the 4510 // predicated block, is not in the loop, or may have side effects. 4511 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4512 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4513 continue; 4514 4515 // It's legal to sink the instruction if all its uses occur in the 4516 // predicated block. Otherwise, there's nothing to do yet, and we may 4517 // need to reanalyze the instruction. 4518 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4519 InstsToReanalyze.push_back(I); 4520 continue; 4521 } 4522 4523 // Move the instruction to the beginning of the predicated block, and add 4524 // it's operands to the worklist. 4525 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4526 Worklist.insert(I->op_begin(), I->op_end()); 4527 4528 // The sinking may have enabled other instructions to be sunk, so we will 4529 // need to iterate. 4530 Changed = true; 4531 } 4532 } while (Changed); 4533 } 4534 4535 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4536 for (PHINode *OrigPhi : OrigPHIsToFix) { 4537 PHINode *NewPhi = 4538 cast<PHINode>(State.get(State.Plan->getVPValue(OrigPhi), 0)); 4539 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 4540 4541 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 4542 predecessors(OrigPhi->getParent())); 4543 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 4544 predecessors(NewPhi->getParent())); 4545 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 4546 "Scalar and Vector BB should have the same number of predecessors"); 4547 4548 // The insertion point in Builder may be invalidated by the time we get 4549 // here. Force the Builder insertion point to something valid so that we do 4550 // not run into issues during insertion point restore in 4551 // getOrCreateVectorValue calls below. 4552 Builder.SetInsertPoint(NewPhi); 4553 4554 // The predecessor order is preserved and we can rely on mapping between 4555 // scalar and vector block predecessors. 4556 for (unsigned i = 0; i < NumIncomingValues; ++i) { 4557 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 4558 4559 // When looking up the new scalar/vector values to fix up, use incoming 4560 // values from original phi. 4561 Value *ScIncV = 4562 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 4563 4564 // Scalar incoming value may need a broadcast 4565 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 4566 NewPhi->addIncoming(NewIncV, NewPredBB); 4567 } 4568 } 4569 } 4570 4571 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4572 VPUser &Operands, unsigned UF, 4573 ElementCount VF, bool IsPtrLoopInvariant, 4574 SmallBitVector &IsIndexLoopInvariant, 4575 VPTransformState &State) { 4576 // Construct a vector GEP by widening the operands of the scalar GEP as 4577 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4578 // results in a vector of pointers when at least one operand of the GEP 4579 // is vector-typed. Thus, to keep the representation compact, we only use 4580 // vector-typed operands for loop-varying values. 4581 4582 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4583 // If we are vectorizing, but the GEP has only loop-invariant operands, 4584 // the GEP we build (by only using vector-typed operands for 4585 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4586 // produce a vector of pointers, we need to either arbitrarily pick an 4587 // operand to broadcast, or broadcast a clone of the original GEP. 4588 // Here, we broadcast a clone of the original. 4589 // 4590 // TODO: If at some point we decide to scalarize instructions having 4591 // loop-invariant operands, this special case will no longer be 4592 // required. We would add the scalarization decision to 4593 // collectLoopScalars() and teach getVectorValue() to broadcast 4594 // the lane-zero scalar value. 4595 auto *Clone = Builder.Insert(GEP->clone()); 4596 for (unsigned Part = 0; Part < UF; ++Part) { 4597 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4598 State.set(VPDef, GEP, EntryPart, Part); 4599 addMetadata(EntryPart, GEP); 4600 } 4601 } else { 4602 // If the GEP has at least one loop-varying operand, we are sure to 4603 // produce a vector of pointers. But if we are only unrolling, we want 4604 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4605 // produce with the code below will be scalar (if VF == 1) or vector 4606 // (otherwise). Note that for the unroll-only case, we still maintain 4607 // values in the vector mapping with initVector, as we do for other 4608 // instructions. 4609 for (unsigned Part = 0; Part < UF; ++Part) { 4610 // The pointer operand of the new GEP. If it's loop-invariant, we 4611 // won't broadcast it. 4612 auto *Ptr = IsPtrLoopInvariant 4613 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4614 : State.get(Operands.getOperand(0), Part); 4615 4616 // Collect all the indices for the new GEP. If any index is 4617 // loop-invariant, we won't broadcast it. 4618 SmallVector<Value *, 4> Indices; 4619 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4620 VPValue *Operand = Operands.getOperand(I); 4621 if (IsIndexLoopInvariant[I - 1]) 4622 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4623 else 4624 Indices.push_back(State.get(Operand, Part)); 4625 } 4626 4627 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4628 // but it should be a vector, otherwise. 4629 auto *NewGEP = 4630 GEP->isInBounds() 4631 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4632 Indices) 4633 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4634 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4635 "NewGEP is not a pointer vector"); 4636 State.set(VPDef, GEP, NewGEP, Part); 4637 addMetadata(NewGEP, GEP); 4638 } 4639 } 4640 } 4641 4642 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4643 RecurrenceDescriptor *RdxDesc, 4644 Value *StartV, unsigned UF, 4645 ElementCount VF) { 4646 assert(!VF.isScalable() && "scalable vectors not yet supported."); 4647 PHINode *P = cast<PHINode>(PN); 4648 if (EnableVPlanNativePath) { 4649 // Currently we enter here in the VPlan-native path for non-induction 4650 // PHIs where all control flow is uniform. We simply widen these PHIs. 4651 // Create a vector phi with no operands - the vector phi operands will be 4652 // set at the end of vector code generation. 4653 Type *VecTy = 4654 (VF.isScalar()) ? PN->getType() : VectorType::get(PN->getType(), VF); 4655 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4656 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 4657 OrigPHIsToFix.push_back(P); 4658 4659 return; 4660 } 4661 4662 assert(PN->getParent() == OrigLoop->getHeader() && 4663 "Non-header phis should have been handled elsewhere"); 4664 4665 // In order to support recurrences we need to be able to vectorize Phi nodes. 4666 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4667 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4668 // this value when we vectorize all of the instructions that use the PHI. 4669 if (RdxDesc || Legal->isFirstOrderRecurrence(P)) { 4670 Value *Iden = nullptr; 4671 bool ScalarPHI = 4672 (VF.isScalar()) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4673 Type *VecTy = 4674 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), VF); 4675 4676 if (RdxDesc) { 4677 assert(Legal->isReductionVariable(P) && StartV && 4678 "RdxDesc should only be set for reduction variables; in that case " 4679 "a StartV is also required"); 4680 RecurKind RK = RdxDesc->getRecurrenceKind(); 4681 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK)) { 4682 // MinMax reduction have the start value as their identify. 4683 if (ScalarPHI) { 4684 Iden = StartV; 4685 } else { 4686 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4687 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4688 StartV = Iden = Builder.CreateVectorSplat(VF, StartV, "minmax.ident"); 4689 } 4690 } else { 4691 Constant *IdenC = RecurrenceDescriptor::getRecurrenceIdentity( 4692 RK, VecTy->getScalarType()); 4693 Iden = IdenC; 4694 4695 if (!ScalarPHI) { 4696 Iden = ConstantVector::getSplat(VF, IdenC); 4697 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 4698 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4699 Constant *Zero = Builder.getInt32(0); 4700 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 4701 } 4702 } 4703 } 4704 4705 for (unsigned Part = 0; Part < UF; ++Part) { 4706 // This is phase one of vectorizing PHIs. 4707 Value *EntryPart = PHINode::Create( 4708 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4709 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4710 if (StartV) { 4711 // Make sure to add the reduction start value only to the 4712 // first unroll part. 4713 Value *StartVal = (Part == 0) ? StartV : Iden; 4714 cast<PHINode>(EntryPart)->addIncoming(StartVal, LoopVectorPreHeader); 4715 } 4716 } 4717 return; 4718 } 4719 4720 assert(!Legal->isReductionVariable(P) && 4721 "reductions should be handled above"); 4722 4723 setDebugLocFromInst(Builder, P); 4724 4725 // This PHINode must be an induction variable. 4726 // Make sure that we know about it. 4727 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4728 4729 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4730 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4731 4732 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4733 // which can be found from the original scalar operations. 4734 switch (II.getKind()) { 4735 case InductionDescriptor::IK_NoInduction: 4736 llvm_unreachable("Unknown induction"); 4737 case InductionDescriptor::IK_IntInduction: 4738 case InductionDescriptor::IK_FpInduction: 4739 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4740 case InductionDescriptor::IK_PtrInduction: { 4741 // Handle the pointer induction variable case. 4742 assert(P->getType()->isPointerTy() && "Unexpected type."); 4743 4744 if (Cost->isScalarAfterVectorization(P, VF)) { 4745 // This is the normalized GEP that starts counting at zero. 4746 Value *PtrInd = 4747 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4748 // Determine the number of scalars we need to generate for each unroll 4749 // iteration. If the instruction is uniform, we only need to generate the 4750 // first lane. Otherwise, we generate all VF values. 4751 unsigned Lanes = 4752 Cost->isUniformAfterVectorization(P, VF) ? 1 : VF.getKnownMinValue(); 4753 for (unsigned Part = 0; Part < UF; ++Part) { 4754 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4755 Constant *Idx = ConstantInt::get(PtrInd->getType(), 4756 Lane + Part * VF.getKnownMinValue()); 4757 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4758 Value *SclrGep = 4759 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4760 SclrGep->setName("next.gep"); 4761 VectorLoopValueMap.setScalarValue(P, VPIteration(Part, Lane), 4762 SclrGep); 4763 } 4764 } 4765 return; 4766 } 4767 assert(isa<SCEVConstant>(II.getStep()) && 4768 "Induction step not a SCEV constant!"); 4769 Type *PhiType = II.getStep()->getType(); 4770 4771 // Build a pointer phi 4772 Value *ScalarStartValue = II.getStartValue(); 4773 Type *ScStValueType = ScalarStartValue->getType(); 4774 PHINode *NewPointerPhi = 4775 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4776 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4777 4778 // A pointer induction, performed by using a gep 4779 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4780 Instruction *InductionLoc = LoopLatch->getTerminator(); 4781 const SCEV *ScalarStep = II.getStep(); 4782 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4783 Value *ScalarStepValue = 4784 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4785 Value *InductionGEP = GetElementPtrInst::Create( 4786 ScStValueType->getPointerElementType(), NewPointerPhi, 4787 Builder.CreateMul( 4788 ScalarStepValue, 4789 ConstantInt::get(PhiType, VF.getKnownMinValue() * UF)), 4790 "ptr.ind", InductionLoc); 4791 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4792 4793 // Create UF many actual address geps that use the pointer 4794 // phi as base and a vectorized version of the step value 4795 // (<step*0, ..., step*N>) as offset. 4796 for (unsigned Part = 0; Part < UF; ++Part) { 4797 SmallVector<Constant *, 8> Indices; 4798 // Create a vector of consecutive numbers from zero to VF. 4799 for (unsigned i = 0; i < VF.getKnownMinValue(); ++i) 4800 Indices.push_back( 4801 ConstantInt::get(PhiType, i + Part * VF.getKnownMinValue())); 4802 Constant *StartOffset = ConstantVector::get(Indices); 4803 4804 Value *GEP = Builder.CreateGEP( 4805 ScStValueType->getPointerElementType(), NewPointerPhi, 4806 Builder.CreateMul( 4807 StartOffset, 4808 Builder.CreateVectorSplat(VF.getKnownMinValue(), ScalarStepValue), 4809 "vector.gep")); 4810 VectorLoopValueMap.setVectorValue(P, Part, GEP); 4811 } 4812 } 4813 } 4814 } 4815 4816 /// A helper function for checking whether an integer division-related 4817 /// instruction may divide by zero (in which case it must be predicated if 4818 /// executed conditionally in the scalar code). 4819 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4820 /// Non-zero divisors that are non compile-time constants will not be 4821 /// converted into multiplication, so we will still end up scalarizing 4822 /// the division, but can do so w/o predication. 4823 static bool mayDivideByZero(Instruction &I) { 4824 assert((I.getOpcode() == Instruction::UDiv || 4825 I.getOpcode() == Instruction::SDiv || 4826 I.getOpcode() == Instruction::URem || 4827 I.getOpcode() == Instruction::SRem) && 4828 "Unexpected instruction"); 4829 Value *Divisor = I.getOperand(1); 4830 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4831 return !CInt || CInt->isZero(); 4832 } 4833 4834 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4835 VPUser &User, 4836 VPTransformState &State) { 4837 switch (I.getOpcode()) { 4838 case Instruction::Call: 4839 case Instruction::Br: 4840 case Instruction::PHI: 4841 case Instruction::GetElementPtr: 4842 case Instruction::Select: 4843 llvm_unreachable("This instruction is handled by a different recipe."); 4844 case Instruction::UDiv: 4845 case Instruction::SDiv: 4846 case Instruction::SRem: 4847 case Instruction::URem: 4848 case Instruction::Add: 4849 case Instruction::FAdd: 4850 case Instruction::Sub: 4851 case Instruction::FSub: 4852 case Instruction::FNeg: 4853 case Instruction::Mul: 4854 case Instruction::FMul: 4855 case Instruction::FDiv: 4856 case Instruction::FRem: 4857 case Instruction::Shl: 4858 case Instruction::LShr: 4859 case Instruction::AShr: 4860 case Instruction::And: 4861 case Instruction::Or: 4862 case Instruction::Xor: { 4863 // Just widen unops and binops. 4864 setDebugLocFromInst(Builder, &I); 4865 4866 for (unsigned Part = 0; Part < UF; ++Part) { 4867 SmallVector<Value *, 2> Ops; 4868 for (VPValue *VPOp : User.operands()) 4869 Ops.push_back(State.get(VPOp, Part)); 4870 4871 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4872 4873 if (auto *VecOp = dyn_cast<Instruction>(V)) 4874 VecOp->copyIRFlags(&I); 4875 4876 // Use this vector value for all users of the original instruction. 4877 State.set(Def, &I, V, Part); 4878 addMetadata(V, &I); 4879 } 4880 4881 break; 4882 } 4883 case Instruction::ICmp: 4884 case Instruction::FCmp: { 4885 // Widen compares. Generate vector compares. 4886 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4887 auto *Cmp = cast<CmpInst>(&I); 4888 setDebugLocFromInst(Builder, Cmp); 4889 for (unsigned Part = 0; Part < UF; ++Part) { 4890 Value *A = State.get(User.getOperand(0), Part); 4891 Value *B = State.get(User.getOperand(1), Part); 4892 Value *C = nullptr; 4893 if (FCmp) { 4894 // Propagate fast math flags. 4895 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4896 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4897 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4898 } else { 4899 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4900 } 4901 State.set(Def, &I, C, Part); 4902 addMetadata(C, &I); 4903 } 4904 4905 break; 4906 } 4907 4908 case Instruction::ZExt: 4909 case Instruction::SExt: 4910 case Instruction::FPToUI: 4911 case Instruction::FPToSI: 4912 case Instruction::FPExt: 4913 case Instruction::PtrToInt: 4914 case Instruction::IntToPtr: 4915 case Instruction::SIToFP: 4916 case Instruction::UIToFP: 4917 case Instruction::Trunc: 4918 case Instruction::FPTrunc: 4919 case Instruction::BitCast: { 4920 auto *CI = cast<CastInst>(&I); 4921 setDebugLocFromInst(Builder, CI); 4922 4923 /// Vectorize casts. 4924 Type *DestTy = 4925 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4926 4927 for (unsigned Part = 0; Part < UF; ++Part) { 4928 Value *A = State.get(User.getOperand(0), Part); 4929 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4930 State.set(Def, &I, Cast, Part); 4931 addMetadata(Cast, &I); 4932 } 4933 break; 4934 } 4935 default: 4936 // This instruction is not vectorized by simple widening. 4937 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4938 llvm_unreachable("Unhandled instruction!"); 4939 } // end of switch. 4940 } 4941 4942 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4943 VPUser &ArgOperands, 4944 VPTransformState &State) { 4945 assert(!isa<DbgInfoIntrinsic>(I) && 4946 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4947 setDebugLocFromInst(Builder, &I); 4948 4949 Module *M = I.getParent()->getParent()->getParent(); 4950 auto *CI = cast<CallInst>(&I); 4951 4952 SmallVector<Type *, 4> Tys; 4953 for (Value *ArgOperand : CI->arg_operands()) 4954 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4955 4956 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4957 4958 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4959 // version of the instruction. 4960 // Is it beneficial to perform intrinsic call compared to lib call? 4961 bool NeedToScalarize = false; 4962 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4963 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4964 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4965 assert((UseVectorIntrinsic || !NeedToScalarize) && 4966 "Instruction should be scalarized elsewhere."); 4967 assert(IntrinsicCost.isValid() && CallCost.isValid() && 4968 "Cannot have invalid costs while widening"); 4969 4970 for (unsigned Part = 0; Part < UF; ++Part) { 4971 SmallVector<Value *, 4> Args; 4972 for (auto &I : enumerate(ArgOperands.operands())) { 4973 // Some intrinsics have a scalar argument - don't replace it with a 4974 // vector. 4975 Value *Arg; 4976 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4977 Arg = State.get(I.value(), Part); 4978 else 4979 Arg = State.get(I.value(), VPIteration(0, 0)); 4980 Args.push_back(Arg); 4981 } 4982 4983 Function *VectorF; 4984 if (UseVectorIntrinsic) { 4985 // Use vector version of the intrinsic. 4986 Type *TysForDecl[] = {CI->getType()}; 4987 if (VF.isVector()) { 4988 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 4989 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4990 } 4991 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4992 assert(VectorF && "Can't retrieve vector intrinsic."); 4993 } else { 4994 // Use vector version of the function call. 4995 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 4996 #ifndef NDEBUG 4997 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4998 "Can't create vector function."); 4999 #endif 5000 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5001 } 5002 SmallVector<OperandBundleDef, 1> OpBundles; 5003 CI->getOperandBundlesAsDefs(OpBundles); 5004 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5005 5006 if (isa<FPMathOperator>(V)) 5007 V->copyFastMathFlags(CI); 5008 5009 State.set(Def, &I, V, Part); 5010 addMetadata(V, &I); 5011 } 5012 } 5013 5014 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5015 VPUser &Operands, 5016 bool InvariantCond, 5017 VPTransformState &State) { 5018 setDebugLocFromInst(Builder, &I); 5019 5020 // The condition can be loop invariant but still defined inside the 5021 // loop. This means that we can't just use the original 'cond' value. 5022 // We have to take the 'vectorized' value and pick the first lane. 5023 // Instcombine will make this a no-op. 5024 auto *InvarCond = InvariantCond 5025 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5026 : nullptr; 5027 5028 for (unsigned Part = 0; Part < UF; ++Part) { 5029 Value *Cond = 5030 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5031 Value *Op0 = State.get(Operands.getOperand(1), Part); 5032 Value *Op1 = State.get(Operands.getOperand(2), Part); 5033 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5034 State.set(VPDef, &I, Sel, Part); 5035 addMetadata(Sel, &I); 5036 } 5037 } 5038 5039 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5040 // We should not collect Scalars more than once per VF. Right now, this 5041 // function is called from collectUniformsAndScalars(), which already does 5042 // this check. Collecting Scalars for VF=1 does not make any sense. 5043 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5044 "This function should not be visited twice for the same VF"); 5045 5046 SmallSetVector<Instruction *, 8> Worklist; 5047 5048 // These sets are used to seed the analysis with pointers used by memory 5049 // accesses that will remain scalar. 5050 SmallSetVector<Instruction *, 8> ScalarPtrs; 5051 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5052 auto *Latch = TheLoop->getLoopLatch(); 5053 5054 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5055 // The pointer operands of loads and stores will be scalar as long as the 5056 // memory access is not a gather or scatter operation. The value operand of a 5057 // store will remain scalar if the store is scalarized. 5058 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5059 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5060 assert(WideningDecision != CM_Unknown && 5061 "Widening decision should be ready at this moment"); 5062 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5063 if (Ptr == Store->getValueOperand()) 5064 return WideningDecision == CM_Scalarize; 5065 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5066 "Ptr is neither a value or pointer operand"); 5067 return WideningDecision != CM_GatherScatter; 5068 }; 5069 5070 // A helper that returns true if the given value is a bitcast or 5071 // getelementptr instruction contained in the loop. 5072 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5073 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5074 isa<GetElementPtrInst>(V)) && 5075 !TheLoop->isLoopInvariant(V); 5076 }; 5077 5078 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5079 if (!isa<PHINode>(Ptr) || 5080 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5081 return false; 5082 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5083 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5084 return false; 5085 return isScalarUse(MemAccess, Ptr); 5086 }; 5087 5088 // A helper that evaluates a memory access's use of a pointer. If the 5089 // pointer is actually the pointer induction of a loop, it is being 5090 // inserted into Worklist. If the use will be a scalar use, and the 5091 // pointer is only used by memory accesses, we place the pointer in 5092 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5093 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5094 if (isScalarPtrInduction(MemAccess, Ptr)) { 5095 Worklist.insert(cast<Instruction>(Ptr)); 5096 Instruction *Update = cast<Instruction>( 5097 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5098 Worklist.insert(Update); 5099 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5100 << "\n"); 5101 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 5102 << "\n"); 5103 return; 5104 } 5105 // We only care about bitcast and getelementptr instructions contained in 5106 // the loop. 5107 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5108 return; 5109 5110 // If the pointer has already been identified as scalar (e.g., if it was 5111 // also identified as uniform), there's nothing to do. 5112 auto *I = cast<Instruction>(Ptr); 5113 if (Worklist.count(I)) 5114 return; 5115 5116 // If the use of the pointer will be a scalar use, and all users of the 5117 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5118 // place the pointer in PossibleNonScalarPtrs. 5119 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5120 return isa<LoadInst>(U) || isa<StoreInst>(U); 5121 })) 5122 ScalarPtrs.insert(I); 5123 else 5124 PossibleNonScalarPtrs.insert(I); 5125 }; 5126 5127 // We seed the scalars analysis with three classes of instructions: (1) 5128 // instructions marked uniform-after-vectorization and (2) bitcast, 5129 // getelementptr and (pointer) phi instructions used by memory accesses 5130 // requiring a scalar use. 5131 // 5132 // (1) Add to the worklist all instructions that have been identified as 5133 // uniform-after-vectorization. 5134 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5135 5136 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5137 // memory accesses requiring a scalar use. The pointer operands of loads and 5138 // stores will be scalar as long as the memory accesses is not a gather or 5139 // scatter operation. The value operand of a store will remain scalar if the 5140 // store is scalarized. 5141 for (auto *BB : TheLoop->blocks()) 5142 for (auto &I : *BB) { 5143 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5144 evaluatePtrUse(Load, Load->getPointerOperand()); 5145 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5146 evaluatePtrUse(Store, Store->getPointerOperand()); 5147 evaluatePtrUse(Store, Store->getValueOperand()); 5148 } 5149 } 5150 for (auto *I : ScalarPtrs) 5151 if (!PossibleNonScalarPtrs.count(I)) { 5152 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5153 Worklist.insert(I); 5154 } 5155 5156 // Insert the forced scalars. 5157 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5158 // induction variable when the PHI user is scalarized. 5159 auto ForcedScalar = ForcedScalars.find(VF); 5160 if (ForcedScalar != ForcedScalars.end()) 5161 for (auto *I : ForcedScalar->second) 5162 Worklist.insert(I); 5163 5164 // Expand the worklist by looking through any bitcasts and getelementptr 5165 // instructions we've already identified as scalar. This is similar to the 5166 // expansion step in collectLoopUniforms(); however, here we're only 5167 // expanding to include additional bitcasts and getelementptr instructions. 5168 unsigned Idx = 0; 5169 while (Idx != Worklist.size()) { 5170 Instruction *Dst = Worklist[Idx++]; 5171 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5172 continue; 5173 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5174 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5175 auto *J = cast<Instruction>(U); 5176 return !TheLoop->contains(J) || Worklist.count(J) || 5177 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5178 isScalarUse(J, Src)); 5179 })) { 5180 Worklist.insert(Src); 5181 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5182 } 5183 } 5184 5185 // An induction variable will remain scalar if all users of the induction 5186 // variable and induction variable update remain scalar. 5187 for (auto &Induction : Legal->getInductionVars()) { 5188 auto *Ind = Induction.first; 5189 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5190 5191 // If tail-folding is applied, the primary induction variable will be used 5192 // to feed a vector compare. 5193 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5194 continue; 5195 5196 // Determine if all users of the induction variable are scalar after 5197 // vectorization. 5198 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5199 auto *I = cast<Instruction>(U); 5200 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5201 }); 5202 if (!ScalarInd) 5203 continue; 5204 5205 // Determine if all users of the induction variable update instruction are 5206 // scalar after vectorization. 5207 auto ScalarIndUpdate = 5208 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5209 auto *I = cast<Instruction>(U); 5210 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5211 }); 5212 if (!ScalarIndUpdate) 5213 continue; 5214 5215 // The induction variable and its update instruction will remain scalar. 5216 Worklist.insert(Ind); 5217 Worklist.insert(IndUpdate); 5218 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5219 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5220 << "\n"); 5221 } 5222 5223 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5224 } 5225 5226 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, 5227 ElementCount VF) { 5228 if (!blockNeedsPredication(I->getParent())) 5229 return false; 5230 switch(I->getOpcode()) { 5231 default: 5232 break; 5233 case Instruction::Load: 5234 case Instruction::Store: { 5235 if (!Legal->isMaskRequired(I)) 5236 return false; 5237 auto *Ptr = getLoadStorePointerOperand(I); 5238 auto *Ty = getMemInstValueType(I); 5239 // We have already decided how to vectorize this instruction, get that 5240 // result. 5241 if (VF.isVector()) { 5242 InstWidening WideningDecision = getWideningDecision(I, VF); 5243 assert(WideningDecision != CM_Unknown && 5244 "Widening decision should be ready at this moment"); 5245 return WideningDecision == CM_Scalarize; 5246 } 5247 const Align Alignment = getLoadStoreAlignment(I); 5248 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5249 isLegalMaskedGather(Ty, Alignment)) 5250 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5251 isLegalMaskedScatter(Ty, Alignment)); 5252 } 5253 case Instruction::UDiv: 5254 case Instruction::SDiv: 5255 case Instruction::SRem: 5256 case Instruction::URem: 5257 return mayDivideByZero(*I); 5258 } 5259 return false; 5260 } 5261 5262 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5263 Instruction *I, ElementCount VF) { 5264 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5265 assert(getWideningDecision(I, VF) == CM_Unknown && 5266 "Decision should not be set yet."); 5267 auto *Group = getInterleavedAccessGroup(I); 5268 assert(Group && "Must have a group."); 5269 5270 // If the instruction's allocated size doesn't equal it's type size, it 5271 // requires padding and will be scalarized. 5272 auto &DL = I->getModule()->getDataLayout(); 5273 auto *ScalarTy = getMemInstValueType(I); 5274 if (hasIrregularType(ScalarTy, DL, VF)) 5275 return false; 5276 5277 // Check if masking is required. 5278 // A Group may need masking for one of two reasons: it resides in a block that 5279 // needs predication, or it was decided to use masking to deal with gaps. 5280 bool PredicatedAccessRequiresMasking = 5281 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5282 bool AccessWithGapsRequiresMasking = 5283 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5284 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5285 return true; 5286 5287 // If masked interleaving is required, we expect that the user/target had 5288 // enabled it, because otherwise it either wouldn't have been created or 5289 // it should have been invalidated by the CostModel. 5290 assert(useMaskedInterleavedAccesses(TTI) && 5291 "Masked interleave-groups for predicated accesses are not enabled."); 5292 5293 auto *Ty = getMemInstValueType(I); 5294 const Align Alignment = getLoadStoreAlignment(I); 5295 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5296 : TTI.isLegalMaskedStore(Ty, Alignment); 5297 } 5298 5299 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5300 Instruction *I, ElementCount VF) { 5301 // Get and ensure we have a valid memory instruction. 5302 LoadInst *LI = dyn_cast<LoadInst>(I); 5303 StoreInst *SI = dyn_cast<StoreInst>(I); 5304 assert((LI || SI) && "Invalid memory instruction"); 5305 5306 auto *Ptr = getLoadStorePointerOperand(I); 5307 5308 // In order to be widened, the pointer should be consecutive, first of all. 5309 if (!Legal->isConsecutivePtr(Ptr)) 5310 return false; 5311 5312 // If the instruction is a store located in a predicated block, it will be 5313 // scalarized. 5314 if (isScalarWithPredication(I)) 5315 return false; 5316 5317 // If the instruction's allocated size doesn't equal it's type size, it 5318 // requires padding and will be scalarized. 5319 auto &DL = I->getModule()->getDataLayout(); 5320 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5321 if (hasIrregularType(ScalarTy, DL, VF)) 5322 return false; 5323 5324 return true; 5325 } 5326 5327 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5328 // We should not collect Uniforms more than once per VF. Right now, 5329 // this function is called from collectUniformsAndScalars(), which 5330 // already does this check. Collecting Uniforms for VF=1 does not make any 5331 // sense. 5332 5333 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5334 "This function should not be visited twice for the same VF"); 5335 5336 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5337 // not analyze again. Uniforms.count(VF) will return 1. 5338 Uniforms[VF].clear(); 5339 5340 // We now know that the loop is vectorizable! 5341 // Collect instructions inside the loop that will remain uniform after 5342 // vectorization. 5343 5344 // Global values, params and instructions outside of current loop are out of 5345 // scope. 5346 auto isOutOfScope = [&](Value *V) -> bool { 5347 Instruction *I = dyn_cast<Instruction>(V); 5348 return (!I || !TheLoop->contains(I)); 5349 }; 5350 5351 SetVector<Instruction *> Worklist; 5352 BasicBlock *Latch = TheLoop->getLoopLatch(); 5353 5354 // Instructions that are scalar with predication must not be considered 5355 // uniform after vectorization, because that would create an erroneous 5356 // replicating region where only a single instance out of VF should be formed. 5357 // TODO: optimize such seldom cases if found important, see PR40816. 5358 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5359 if (isOutOfScope(I)) { 5360 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5361 << *I << "\n"); 5362 return; 5363 } 5364 if (isScalarWithPredication(I, VF)) { 5365 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5366 << *I << "\n"); 5367 return; 5368 } 5369 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5370 Worklist.insert(I); 5371 }; 5372 5373 // Start with the conditional branch. If the branch condition is an 5374 // instruction contained in the loop that is only used by the branch, it is 5375 // uniform. 5376 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5377 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5378 addToWorklistIfAllowed(Cmp); 5379 5380 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5381 InstWidening WideningDecision = getWideningDecision(I, VF); 5382 assert(WideningDecision != CM_Unknown && 5383 "Widening decision should be ready at this moment"); 5384 5385 // A uniform memory op is itself uniform. We exclude uniform stores 5386 // here as they demand the last lane, not the first one. 5387 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5388 assert(WideningDecision == CM_Scalarize); 5389 return true; 5390 } 5391 5392 return (WideningDecision == CM_Widen || 5393 WideningDecision == CM_Widen_Reverse || 5394 WideningDecision == CM_Interleave); 5395 }; 5396 5397 5398 // Returns true if Ptr is the pointer operand of a memory access instruction 5399 // I, and I is known to not require scalarization. 5400 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5401 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5402 }; 5403 5404 // Holds a list of values which are known to have at least one uniform use. 5405 // Note that there may be other uses which aren't uniform. A "uniform use" 5406 // here is something which only demands lane 0 of the unrolled iterations; 5407 // it does not imply that all lanes produce the same value (e.g. this is not 5408 // the usual meaning of uniform) 5409 SmallPtrSet<Value *, 8> HasUniformUse; 5410 5411 // Scan the loop for instructions which are either a) known to have only 5412 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5413 for (auto *BB : TheLoop->blocks()) 5414 for (auto &I : *BB) { 5415 // If there's no pointer operand, there's nothing to do. 5416 auto *Ptr = getLoadStorePointerOperand(&I); 5417 if (!Ptr) 5418 continue; 5419 5420 // A uniform memory op is itself uniform. We exclude uniform stores 5421 // here as they demand the last lane, not the first one. 5422 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5423 addToWorklistIfAllowed(&I); 5424 5425 if (isUniformDecision(&I, VF)) { 5426 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5427 HasUniformUse.insert(Ptr); 5428 } 5429 } 5430 5431 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5432 // demanding) users. Since loops are assumed to be in LCSSA form, this 5433 // disallows uses outside the loop as well. 5434 for (auto *V : HasUniformUse) { 5435 if (isOutOfScope(V)) 5436 continue; 5437 auto *I = cast<Instruction>(V); 5438 auto UsersAreMemAccesses = 5439 llvm::all_of(I->users(), [&](User *U) -> bool { 5440 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5441 }); 5442 if (UsersAreMemAccesses) 5443 addToWorklistIfAllowed(I); 5444 } 5445 5446 // Expand Worklist in topological order: whenever a new instruction 5447 // is added , its users should be already inside Worklist. It ensures 5448 // a uniform instruction will only be used by uniform instructions. 5449 unsigned idx = 0; 5450 while (idx != Worklist.size()) { 5451 Instruction *I = Worklist[idx++]; 5452 5453 for (auto OV : I->operand_values()) { 5454 // isOutOfScope operands cannot be uniform instructions. 5455 if (isOutOfScope(OV)) 5456 continue; 5457 // First order recurrence Phi's should typically be considered 5458 // non-uniform. 5459 auto *OP = dyn_cast<PHINode>(OV); 5460 if (OP && Legal->isFirstOrderRecurrence(OP)) 5461 continue; 5462 // If all the users of the operand are uniform, then add the 5463 // operand into the uniform worklist. 5464 auto *OI = cast<Instruction>(OV); 5465 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5466 auto *J = cast<Instruction>(U); 5467 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5468 })) 5469 addToWorklistIfAllowed(OI); 5470 } 5471 } 5472 5473 // For an instruction to be added into Worklist above, all its users inside 5474 // the loop should also be in Worklist. However, this condition cannot be 5475 // true for phi nodes that form a cyclic dependence. We must process phi 5476 // nodes separately. An induction variable will remain uniform if all users 5477 // of the induction variable and induction variable update remain uniform. 5478 // The code below handles both pointer and non-pointer induction variables. 5479 for (auto &Induction : Legal->getInductionVars()) { 5480 auto *Ind = Induction.first; 5481 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5482 5483 // Determine if all users of the induction variable are uniform after 5484 // vectorization. 5485 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5486 auto *I = cast<Instruction>(U); 5487 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5488 isVectorizedMemAccessUse(I, Ind); 5489 }); 5490 if (!UniformInd) 5491 continue; 5492 5493 // Determine if all users of the induction variable update instruction are 5494 // uniform after vectorization. 5495 auto UniformIndUpdate = 5496 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5497 auto *I = cast<Instruction>(U); 5498 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5499 isVectorizedMemAccessUse(I, IndUpdate); 5500 }); 5501 if (!UniformIndUpdate) 5502 continue; 5503 5504 // The induction variable and its update instruction will remain uniform. 5505 addToWorklistIfAllowed(Ind); 5506 addToWorklistIfAllowed(IndUpdate); 5507 } 5508 5509 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5510 } 5511 5512 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5513 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5514 5515 if (Legal->getRuntimePointerChecking()->Need) { 5516 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5517 "runtime pointer checks needed. Enable vectorization of this " 5518 "loop with '#pragma clang loop vectorize(enable)' when " 5519 "compiling with -Os/-Oz", 5520 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5521 return true; 5522 } 5523 5524 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5525 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5526 "runtime SCEV checks needed. Enable vectorization of this " 5527 "loop with '#pragma clang loop vectorize(enable)' when " 5528 "compiling with -Os/-Oz", 5529 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5530 return true; 5531 } 5532 5533 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5534 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5535 reportVectorizationFailure("Runtime stride check for small trip count", 5536 "runtime stride == 1 checks needed. Enable vectorization of " 5537 "this loop without such check by compiling with -Os/-Oz", 5538 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5539 return true; 5540 } 5541 5542 return false; 5543 } 5544 5545 Optional<ElementCount> 5546 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5547 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5548 // TODO: It may by useful to do since it's still likely to be dynamically 5549 // uniform if the target can skip. 5550 reportVectorizationFailure( 5551 "Not inserting runtime ptr check for divergent target", 5552 "runtime pointer checks needed. Not enabled for divergent target", 5553 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5554 return None; 5555 } 5556 5557 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5558 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5559 if (TC == 1) { 5560 reportVectorizationFailure("Single iteration (non) loop", 5561 "loop trip count is one, irrelevant for vectorization", 5562 "SingleIterationLoop", ORE, TheLoop); 5563 return None; 5564 } 5565 5566 switch (ScalarEpilogueStatus) { 5567 case CM_ScalarEpilogueAllowed: 5568 return computeFeasibleMaxVF(TC, UserVF); 5569 case CM_ScalarEpilogueNotAllowedUsePredicate: 5570 LLVM_FALLTHROUGH; 5571 case CM_ScalarEpilogueNotNeededUsePredicate: 5572 LLVM_DEBUG( 5573 dbgs() << "LV: vector predicate hint/switch found.\n" 5574 << "LV: Not allowing scalar epilogue, creating predicated " 5575 << "vector loop.\n"); 5576 break; 5577 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5578 // fallthrough as a special case of OptForSize 5579 case CM_ScalarEpilogueNotAllowedOptSize: 5580 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5581 LLVM_DEBUG( 5582 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5583 else 5584 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5585 << "count.\n"); 5586 5587 // Bail if runtime checks are required, which are not good when optimising 5588 // for size. 5589 if (runtimeChecksRequired()) 5590 return None; 5591 5592 break; 5593 } 5594 5595 // The only loops we can vectorize without a scalar epilogue, are loops with 5596 // a bottom-test and a single exiting block. We'd have to handle the fact 5597 // that not every instruction executes on the last iteration. This will 5598 // require a lane mask which varies through the vector loop body. (TODO) 5599 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5600 // If there was a tail-folding hint/switch, but we can't fold the tail by 5601 // masking, fallback to a vectorization with a scalar epilogue. 5602 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5603 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5604 "scalar epilogue instead.\n"); 5605 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5606 return computeFeasibleMaxVF(TC, UserVF); 5607 } 5608 return None; 5609 } 5610 5611 // Now try the tail folding 5612 5613 // Invalidate interleave groups that require an epilogue if we can't mask 5614 // the interleave-group. 5615 if (!useMaskedInterleavedAccesses(TTI)) { 5616 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5617 "No decisions should have been taken at this point"); 5618 // Note: There is no need to invalidate any cost modeling decisions here, as 5619 // non where taken so far. 5620 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5621 } 5622 5623 ElementCount MaxVF = computeFeasibleMaxVF(TC, UserVF); 5624 assert(!MaxVF.isScalable() && 5625 "Scalable vectors do not yet support tail folding"); 5626 assert((UserVF.isNonZero() || isPowerOf2_32(MaxVF.getFixedValue())) && 5627 "MaxVF must be a power of 2"); 5628 unsigned MaxVFtimesIC = 5629 UserIC ? MaxVF.getFixedValue() * UserIC : MaxVF.getFixedValue(); 5630 // Avoid tail folding if the trip count is known to be a multiple of any VF we 5631 // chose. 5632 ScalarEvolution *SE = PSE.getSE(); 5633 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5634 const SCEV *ExitCount = SE->getAddExpr( 5635 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5636 const SCEV *Rem = SE->getURemExpr( 5637 SE->applyLoopGuards(ExitCount, TheLoop), 5638 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5639 if (Rem->isZero()) { 5640 // Accept MaxVF if we do not have a tail. 5641 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5642 return MaxVF; 5643 } 5644 5645 // If we don't know the precise trip count, or if the trip count that we 5646 // found modulo the vectorization factor is not zero, try to fold the tail 5647 // by masking. 5648 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5649 if (Legal->prepareToFoldTailByMasking()) { 5650 FoldTailByMasking = true; 5651 return MaxVF; 5652 } 5653 5654 // If there was a tail-folding hint/switch, but we can't fold the tail by 5655 // masking, fallback to a vectorization with a scalar epilogue. 5656 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5657 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5658 "scalar epilogue instead.\n"); 5659 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5660 return MaxVF; 5661 } 5662 5663 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5664 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5665 return None; 5666 } 5667 5668 if (TC == 0) { 5669 reportVectorizationFailure( 5670 "Unable to calculate the loop count due to complex control flow", 5671 "unable to calculate the loop count due to complex control flow", 5672 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5673 return None; 5674 } 5675 5676 reportVectorizationFailure( 5677 "Cannot optimize for size and vectorize at the same time.", 5678 "cannot optimize for size and vectorize at the same time. " 5679 "Enable vectorization of this loop with '#pragma clang loop " 5680 "vectorize(enable)' when compiling with -Os/-Oz", 5681 "NoTailLoopWithOptForSize", ORE, TheLoop); 5682 return None; 5683 } 5684 5685 ElementCount 5686 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5687 ElementCount UserVF) { 5688 bool IgnoreScalableUserVF = UserVF.isScalable() && 5689 !TTI.supportsScalableVectors() && 5690 !ForceTargetSupportsScalableVectors; 5691 if (IgnoreScalableUserVF) { 5692 LLVM_DEBUG( 5693 dbgs() << "LV: Ignoring VF=" << UserVF 5694 << " because target does not support scalable vectors.\n"); 5695 ORE->emit([&]() { 5696 return OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreScalableUserVF", 5697 TheLoop->getStartLoc(), 5698 TheLoop->getHeader()) 5699 << "Ignoring VF=" << ore::NV("UserVF", UserVF) 5700 << " because target does not support scalable vectors."; 5701 }); 5702 } 5703 5704 // Beyond this point two scenarios are handled. If UserVF isn't specified 5705 // then a suitable VF is chosen. If UserVF is specified and there are 5706 // dependencies, check if it's legal. However, if a UserVF is specified and 5707 // there are no dependencies, then there's nothing to do. 5708 if (UserVF.isNonZero() && !IgnoreScalableUserVF && 5709 Legal->isSafeForAnyVectorWidth()) 5710 return UserVF; 5711 5712 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5713 unsigned SmallestType, WidestType; 5714 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5715 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5716 5717 // Get the maximum safe dependence distance in bits computed by LAA. 5718 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5719 // the memory accesses that is most restrictive (involved in the smallest 5720 // dependence distance). 5721 unsigned MaxSafeVectorWidthInBits = Legal->getMaxSafeVectorWidthInBits(); 5722 5723 // If the user vectorization factor is legally unsafe, clamp it to a safe 5724 // value. Otherwise, return as is. 5725 if (UserVF.isNonZero() && !IgnoreScalableUserVF) { 5726 unsigned MaxSafeElements = 5727 PowerOf2Floor(MaxSafeVectorWidthInBits / WidestType); 5728 ElementCount MaxSafeVF = ElementCount::getFixed(MaxSafeElements); 5729 5730 if (UserVF.isScalable()) { 5731 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5732 5733 // Scale VF by vscale before checking if it's safe. 5734 MaxSafeVF = ElementCount::getScalable( 5735 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5736 5737 if (MaxSafeVF.isZero()) { 5738 // The dependence distance is too small to use scalable vectors, 5739 // fallback on fixed. 5740 LLVM_DEBUG( 5741 dbgs() 5742 << "LV: Max legal vector width too small, scalable vectorization " 5743 "unfeasible. Using fixed-width vectorization instead.\n"); 5744 ORE->emit([&]() { 5745 return OptimizationRemarkAnalysis(DEBUG_TYPE, "ScalableVFUnfeasible", 5746 TheLoop->getStartLoc(), 5747 TheLoop->getHeader()) 5748 << "Max legal vector width too small, scalable vectorization " 5749 << "unfeasible. Using fixed-width vectorization instead."; 5750 }); 5751 return computeFeasibleMaxVF( 5752 ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); 5753 } 5754 } 5755 5756 LLVM_DEBUG(dbgs() << "LV: The max safe VF is: " << MaxSafeVF << ".\n"); 5757 5758 if (ElementCount::isKnownLE(UserVF, MaxSafeVF)) 5759 return UserVF; 5760 5761 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5762 << " is unsafe, clamping to max safe VF=" << MaxSafeVF 5763 << ".\n"); 5764 ORE->emit([&]() { 5765 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5766 TheLoop->getStartLoc(), 5767 TheLoop->getHeader()) 5768 << "User-specified vectorization factor " 5769 << ore::NV("UserVectorizationFactor", UserVF) 5770 << " is unsafe, clamping to maximum safe vectorization factor " 5771 << ore::NV("VectorizationFactor", MaxSafeVF); 5772 }); 5773 return MaxSafeVF; 5774 } 5775 5776 WidestRegister = std::min(WidestRegister, MaxSafeVectorWidthInBits); 5777 5778 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5779 // Note that both WidestRegister and WidestType may not be a powers of 2. 5780 unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType); 5781 5782 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5783 << " / " << WidestType << " bits.\n"); 5784 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5785 << WidestRegister << " bits.\n"); 5786 5787 assert(MaxVectorSize <= WidestRegister && 5788 "Did not expect to pack so many elements" 5789 " into one vector!"); 5790 if (MaxVectorSize == 0) { 5791 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5792 MaxVectorSize = 1; 5793 return ElementCount::getFixed(MaxVectorSize); 5794 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 5795 isPowerOf2_32(ConstTripCount)) { 5796 // We need to clamp the VF to be the ConstTripCount. There is no point in 5797 // choosing a higher viable VF as done in the loop below. 5798 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5799 << ConstTripCount << "\n"); 5800 MaxVectorSize = ConstTripCount; 5801 return ElementCount::getFixed(MaxVectorSize); 5802 } 5803 5804 unsigned MaxVF = MaxVectorSize; 5805 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5806 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5807 // Collect all viable vectorization factors larger than the default MaxVF 5808 // (i.e. MaxVectorSize). 5809 SmallVector<ElementCount, 8> VFs; 5810 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5811 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 5812 VFs.push_back(ElementCount::getFixed(VS)); 5813 5814 // For each VF calculate its register usage. 5815 auto RUs = calculateRegisterUsage(VFs); 5816 5817 // Select the largest VF which doesn't require more registers than existing 5818 // ones. 5819 for (int i = RUs.size() - 1; i >= 0; --i) { 5820 bool Selected = true; 5821 for (auto& pair : RUs[i].MaxLocalUsers) { 5822 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5823 if (pair.second > TargetNumRegisters) 5824 Selected = false; 5825 } 5826 if (Selected) { 5827 MaxVF = VFs[i].getKnownMinValue(); 5828 break; 5829 } 5830 } 5831 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 5832 if (MaxVF < MinVF) { 5833 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5834 << ") with target's minimum: " << MinVF << '\n'); 5835 MaxVF = MinVF; 5836 } 5837 } 5838 } 5839 return ElementCount::getFixed(MaxVF); 5840 } 5841 5842 VectorizationFactor 5843 LoopVectorizationCostModel::selectVectorizationFactor(ElementCount MaxVF) { 5844 // FIXME: This can be fixed for scalable vectors later, because at this stage 5845 // the LoopVectorizer will only consider vectorizing a loop with scalable 5846 // vectors when the loop has a hint to enable vectorization for a given VF. 5847 assert(!MaxVF.isScalable() && "scalable vectors not yet supported"); 5848 5849 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5850 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5851 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5852 5853 unsigned Width = 1; 5854 const float ScalarCost = *ExpectedCost.getValue(); 5855 float Cost = ScalarCost; 5856 5857 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5858 if (ForceVectorization && MaxVF.isVector()) { 5859 // Ignore scalar width, because the user explicitly wants vectorization. 5860 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5861 // evaluation. 5862 Cost = std::numeric_limits<float>::max(); 5863 } 5864 5865 for (unsigned i = 2; i <= MaxVF.getFixedValue(); i *= 2) { 5866 // Notice that the vector loop needs to be executed less times, so 5867 // we need to divide the cost of the vector loops by the width of 5868 // the vector elements. 5869 VectorizationCostTy C = expectedCost(ElementCount::getFixed(i)); 5870 assert(C.first.isValid() && "Unexpected invalid cost for vector loop"); 5871 float VectorCost = *C.first.getValue() / (float)i; 5872 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5873 << " costs: " << (int)VectorCost << ".\n"); 5874 if (!C.second && !ForceVectorization) { 5875 LLVM_DEBUG( 5876 dbgs() << "LV: Not considering vector loop of width " << i 5877 << " because it will not generate any vector instructions.\n"); 5878 continue; 5879 } 5880 5881 // If profitable add it to ProfitableVF list. 5882 if (VectorCost < ScalarCost) { 5883 ProfitableVFs.push_back(VectorizationFactor( 5884 {ElementCount::getFixed(i), (unsigned)VectorCost})); 5885 } 5886 5887 if (VectorCost < Cost) { 5888 Cost = VectorCost; 5889 Width = i; 5890 } 5891 } 5892 5893 if (!EnableCondStoresVectorization && NumPredStores) { 5894 reportVectorizationFailure("There are conditional stores.", 5895 "store that is conditionally executed prevents vectorization", 5896 "ConditionalStore", ORE, TheLoop); 5897 Width = 1; 5898 Cost = ScalarCost; 5899 } 5900 5901 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5902 << "LV: Vectorization seems to be not beneficial, " 5903 << "but was forced by a user.\n"); 5904 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5905 VectorizationFactor Factor = {ElementCount::getFixed(Width), 5906 (unsigned)(Width * Cost)}; 5907 return Factor; 5908 } 5909 5910 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5911 const Loop &L, ElementCount VF) const { 5912 // Cross iteration phis such as reductions need special handling and are 5913 // currently unsupported. 5914 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 5915 return Legal->isFirstOrderRecurrence(&Phi) || 5916 Legal->isReductionVariable(&Phi); 5917 })) 5918 return false; 5919 5920 // Phis with uses outside of the loop require special handling and are 5921 // currently unsupported. 5922 for (auto &Entry : Legal->getInductionVars()) { 5923 // Look for uses of the value of the induction at the last iteration. 5924 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5925 for (User *U : PostInc->users()) 5926 if (!L.contains(cast<Instruction>(U))) 5927 return false; 5928 // Look for uses of penultimate value of the induction. 5929 for (User *U : Entry.first->users()) 5930 if (!L.contains(cast<Instruction>(U))) 5931 return false; 5932 } 5933 5934 // Induction variables that are widened require special handling that is 5935 // currently not supported. 5936 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5937 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5938 this->isProfitableToScalarize(Entry.first, VF)); 5939 })) 5940 return false; 5941 5942 return true; 5943 } 5944 5945 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5946 const ElementCount VF) const { 5947 // FIXME: We need a much better cost-model to take different parameters such 5948 // as register pressure, code size increase and cost of extra branches into 5949 // account. For now we apply a very crude heuristic and only consider loops 5950 // with vectorization factors larger than a certain value. 5951 // We also consider epilogue vectorization unprofitable for targets that don't 5952 // consider interleaving beneficial (eg. MVE). 5953 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5954 return false; 5955 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 5956 return true; 5957 return false; 5958 } 5959 5960 VectorizationFactor 5961 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5962 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5963 VectorizationFactor Result = VectorizationFactor::Disabled(); 5964 if (!EnableEpilogueVectorization) { 5965 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5966 return Result; 5967 } 5968 5969 if (!isScalarEpilogueAllowed()) { 5970 LLVM_DEBUG( 5971 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5972 "allowed.\n";); 5973 return Result; 5974 } 5975 5976 // FIXME: This can be fixed for scalable vectors later, because at this stage 5977 // the LoopVectorizer will only consider vectorizing a loop with scalable 5978 // vectors when the loop has a hint to enable vectorization for a given VF. 5979 if (MainLoopVF.isScalable()) { 5980 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 5981 "yet supported.\n"); 5982 return Result; 5983 } 5984 5985 // Not really a cost consideration, but check for unsupported cases here to 5986 // simplify the logic. 5987 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5988 LLVM_DEBUG( 5989 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5990 "not a supported candidate.\n";); 5991 return Result; 5992 } 5993 5994 if (EpilogueVectorizationForceVF > 1) { 5995 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5996 if (LVP.hasPlanWithVFs( 5997 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 5998 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 5999 else { 6000 LLVM_DEBUG( 6001 dbgs() 6002 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6003 return Result; 6004 } 6005 } 6006 6007 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6008 TheLoop->getHeader()->getParent()->hasMinSize()) { 6009 LLVM_DEBUG( 6010 dbgs() 6011 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6012 return Result; 6013 } 6014 6015 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 6016 return Result; 6017 6018 for (auto &NextVF : ProfitableVFs) 6019 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 6020 (Result.Width.getFixedValue() == 1 || NextVF.Cost < Result.Cost) && 6021 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 6022 Result = NextVF; 6023 6024 if (Result != VectorizationFactor::Disabled()) 6025 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6026 << Result.Width.getFixedValue() << "\n";); 6027 return Result; 6028 } 6029 6030 std::pair<unsigned, unsigned> 6031 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6032 unsigned MinWidth = -1U; 6033 unsigned MaxWidth = 8; 6034 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6035 6036 // For each block. 6037 for (BasicBlock *BB : TheLoop->blocks()) { 6038 // For each instruction in the loop. 6039 for (Instruction &I : BB->instructionsWithoutDebug()) { 6040 Type *T = I.getType(); 6041 6042 // Skip ignored values. 6043 if (ValuesToIgnore.count(&I)) 6044 continue; 6045 6046 // Only examine Loads, Stores and PHINodes. 6047 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6048 continue; 6049 6050 // Examine PHI nodes that are reduction variables. Update the type to 6051 // account for the recurrence type. 6052 if (auto *PN = dyn_cast<PHINode>(&I)) { 6053 if (!Legal->isReductionVariable(PN)) 6054 continue; 6055 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 6056 if (PreferInLoopReductions || 6057 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6058 RdxDesc.getRecurrenceType(), 6059 TargetTransformInfo::ReductionFlags())) 6060 continue; 6061 T = RdxDesc.getRecurrenceType(); 6062 } 6063 6064 // Examine the stored values. 6065 if (auto *ST = dyn_cast<StoreInst>(&I)) 6066 T = ST->getValueOperand()->getType(); 6067 6068 // Ignore loaded pointer types and stored pointer types that are not 6069 // vectorizable. 6070 // 6071 // FIXME: The check here attempts to predict whether a load or store will 6072 // be vectorized. We only know this for certain after a VF has 6073 // been selected. Here, we assume that if an access can be 6074 // vectorized, it will be. We should also look at extending this 6075 // optimization to non-pointer types. 6076 // 6077 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6078 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6079 continue; 6080 6081 MinWidth = std::min(MinWidth, 6082 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6083 MaxWidth = std::max(MaxWidth, 6084 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6085 } 6086 } 6087 6088 return {MinWidth, MaxWidth}; 6089 } 6090 6091 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6092 unsigned LoopCost) { 6093 // -- The interleave heuristics -- 6094 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6095 // There are many micro-architectural considerations that we can't predict 6096 // at this level. For example, frontend pressure (on decode or fetch) due to 6097 // code size, or the number and capabilities of the execution ports. 6098 // 6099 // We use the following heuristics to select the interleave count: 6100 // 1. If the code has reductions, then we interleave to break the cross 6101 // iteration dependency. 6102 // 2. If the loop is really small, then we interleave to reduce the loop 6103 // overhead. 6104 // 3. We don't interleave if we think that we will spill registers to memory 6105 // due to the increased register pressure. 6106 6107 if (!isScalarEpilogueAllowed()) 6108 return 1; 6109 6110 // We used the distance for the interleave count. 6111 if (Legal->getMaxSafeDepDistBytes() != -1U) 6112 return 1; 6113 6114 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6115 const bool HasReductions = !Legal->getReductionVars().empty(); 6116 // Do not interleave loops with a relatively small known or estimated trip 6117 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6118 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6119 // because with the above conditions interleaving can expose ILP and break 6120 // cross iteration dependences for reductions. 6121 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6122 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6123 return 1; 6124 6125 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6126 // We divide by these constants so assume that we have at least one 6127 // instruction that uses at least one register. 6128 for (auto& pair : R.MaxLocalUsers) { 6129 pair.second = std::max(pair.second, 1U); 6130 } 6131 6132 // We calculate the interleave count using the following formula. 6133 // Subtract the number of loop invariants from the number of available 6134 // registers. These registers are used by all of the interleaved instances. 6135 // Next, divide the remaining registers by the number of registers that is 6136 // required by the loop, in order to estimate how many parallel instances 6137 // fit without causing spills. All of this is rounded down if necessary to be 6138 // a power of two. We want power of two interleave count to simplify any 6139 // addressing operations or alignment considerations. 6140 // We also want power of two interleave counts to ensure that the induction 6141 // variable of the vector loop wraps to zero, when tail is folded by masking; 6142 // this currently happens when OptForSize, in which case IC is set to 1 above. 6143 unsigned IC = UINT_MAX; 6144 6145 for (auto& pair : R.MaxLocalUsers) { 6146 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6147 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6148 << " registers of " 6149 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6150 if (VF.isScalar()) { 6151 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6152 TargetNumRegisters = ForceTargetNumScalarRegs; 6153 } else { 6154 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6155 TargetNumRegisters = ForceTargetNumVectorRegs; 6156 } 6157 unsigned MaxLocalUsers = pair.second; 6158 unsigned LoopInvariantRegs = 0; 6159 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6160 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6161 6162 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6163 // Don't count the induction variable as interleaved. 6164 if (EnableIndVarRegisterHeur) { 6165 TmpIC = 6166 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6167 std::max(1U, (MaxLocalUsers - 1))); 6168 } 6169 6170 IC = std::min(IC, TmpIC); 6171 } 6172 6173 // Clamp the interleave ranges to reasonable counts. 6174 unsigned MaxInterleaveCount = 6175 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6176 6177 // Check if the user has overridden the max. 6178 if (VF.isScalar()) { 6179 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6180 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6181 } else { 6182 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6183 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6184 } 6185 6186 // If trip count is known or estimated compile time constant, limit the 6187 // interleave count to be less than the trip count divided by VF, provided it 6188 // is at least 1. 6189 // 6190 // For scalable vectors we can't know if interleaving is beneficial. It may 6191 // not be beneficial for small loops if none of the lanes in the second vector 6192 // iterations is enabled. However, for larger loops, there is likely to be a 6193 // similar benefit as for fixed-width vectors. For now, we choose to leave 6194 // the InterleaveCount as if vscale is '1', although if some information about 6195 // the vector is known (e.g. min vector size), we can make a better decision. 6196 if (BestKnownTC) { 6197 MaxInterleaveCount = 6198 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6199 // Make sure MaxInterleaveCount is greater than 0. 6200 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6201 } 6202 6203 assert(MaxInterleaveCount > 0 && 6204 "Maximum interleave count must be greater than 0"); 6205 6206 // Clamp the calculated IC to be between the 1 and the max interleave count 6207 // that the target and trip count allows. 6208 if (IC > MaxInterleaveCount) 6209 IC = MaxInterleaveCount; 6210 else 6211 // Make sure IC is greater than 0. 6212 IC = std::max(1u, IC); 6213 6214 assert(IC > 0 && "Interleave count must be greater than 0."); 6215 6216 // If we did not calculate the cost for VF (because the user selected the VF) 6217 // then we calculate the cost of VF here. 6218 if (LoopCost == 0) { 6219 assert(expectedCost(VF).first.isValid() && "Expected a valid cost"); 6220 LoopCost = *expectedCost(VF).first.getValue(); 6221 } 6222 6223 assert(LoopCost && "Non-zero loop cost expected"); 6224 6225 // Interleave if we vectorized this loop and there is a reduction that could 6226 // benefit from interleaving. 6227 if (VF.isVector() && HasReductions) { 6228 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6229 return IC; 6230 } 6231 6232 // Note that if we've already vectorized the loop we will have done the 6233 // runtime check and so interleaving won't require further checks. 6234 bool InterleavingRequiresRuntimePointerCheck = 6235 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6236 6237 // We want to interleave small loops in order to reduce the loop overhead and 6238 // potentially expose ILP opportunities. 6239 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6240 << "LV: IC is " << IC << '\n' 6241 << "LV: VF is " << VF << '\n'); 6242 const bool AggressivelyInterleaveReductions = 6243 TTI.enableAggressiveInterleaving(HasReductions); 6244 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6245 // We assume that the cost overhead is 1 and we use the cost model 6246 // to estimate the cost of the loop and interleave until the cost of the 6247 // loop overhead is about 5% of the cost of the loop. 6248 unsigned SmallIC = 6249 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6250 6251 // Interleave until store/load ports (estimated by max interleave count) are 6252 // saturated. 6253 unsigned NumStores = Legal->getNumStores(); 6254 unsigned NumLoads = Legal->getNumLoads(); 6255 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6256 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6257 6258 // If we have a scalar reduction (vector reductions are already dealt with 6259 // by this point), we can increase the critical path length if the loop 6260 // we're interleaving is inside another loop. Limit, by default to 2, so the 6261 // critical path only gets increased by one reduction operation. 6262 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6263 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6264 SmallIC = std::min(SmallIC, F); 6265 StoresIC = std::min(StoresIC, F); 6266 LoadsIC = std::min(LoadsIC, F); 6267 } 6268 6269 if (EnableLoadStoreRuntimeInterleave && 6270 std::max(StoresIC, LoadsIC) > SmallIC) { 6271 LLVM_DEBUG( 6272 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6273 return std::max(StoresIC, LoadsIC); 6274 } 6275 6276 // If there are scalar reductions and TTI has enabled aggressive 6277 // interleaving for reductions, we will interleave to expose ILP. 6278 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6279 AggressivelyInterleaveReductions) { 6280 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6281 // Interleave no less than SmallIC but not as aggressive as the normal IC 6282 // to satisfy the rare situation when resources are too limited. 6283 return std::max(IC / 2, SmallIC); 6284 } else { 6285 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6286 return SmallIC; 6287 } 6288 } 6289 6290 // Interleave if this is a large loop (small loops are already dealt with by 6291 // this point) that could benefit from interleaving. 6292 if (AggressivelyInterleaveReductions) { 6293 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6294 return IC; 6295 } 6296 6297 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6298 return 1; 6299 } 6300 6301 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6302 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6303 // This function calculates the register usage by measuring the highest number 6304 // of values that are alive at a single location. Obviously, this is a very 6305 // rough estimation. We scan the loop in a topological order in order and 6306 // assign a number to each instruction. We use RPO to ensure that defs are 6307 // met before their users. We assume that each instruction that has in-loop 6308 // users starts an interval. We record every time that an in-loop value is 6309 // used, so we have a list of the first and last occurrences of each 6310 // instruction. Next, we transpose this data structure into a multi map that 6311 // holds the list of intervals that *end* at a specific location. This multi 6312 // map allows us to perform a linear search. We scan the instructions linearly 6313 // and record each time that a new interval starts, by placing it in a set. 6314 // If we find this value in the multi-map then we remove it from the set. 6315 // The max register usage is the maximum size of the set. 6316 // We also search for instructions that are defined outside the loop, but are 6317 // used inside the loop. We need this number separately from the max-interval 6318 // usage number because when we unroll, loop-invariant values do not take 6319 // more register. 6320 LoopBlocksDFS DFS(TheLoop); 6321 DFS.perform(LI); 6322 6323 RegisterUsage RU; 6324 6325 // Each 'key' in the map opens a new interval. The values 6326 // of the map are the index of the 'last seen' usage of the 6327 // instruction that is the key. 6328 using IntervalMap = DenseMap<Instruction *, unsigned>; 6329 6330 // Maps instruction to its index. 6331 SmallVector<Instruction *, 64> IdxToInstr; 6332 // Marks the end of each interval. 6333 IntervalMap EndPoint; 6334 // Saves the list of instruction indices that are used in the loop. 6335 SmallPtrSet<Instruction *, 8> Ends; 6336 // Saves the list of values that are used in the loop but are 6337 // defined outside the loop, such as arguments and constants. 6338 SmallPtrSet<Value *, 8> LoopInvariants; 6339 6340 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6341 for (Instruction &I : BB->instructionsWithoutDebug()) { 6342 IdxToInstr.push_back(&I); 6343 6344 // Save the end location of each USE. 6345 for (Value *U : I.operands()) { 6346 auto *Instr = dyn_cast<Instruction>(U); 6347 6348 // Ignore non-instruction values such as arguments, constants, etc. 6349 if (!Instr) 6350 continue; 6351 6352 // If this instruction is outside the loop then record it and continue. 6353 if (!TheLoop->contains(Instr)) { 6354 LoopInvariants.insert(Instr); 6355 continue; 6356 } 6357 6358 // Overwrite previous end points. 6359 EndPoint[Instr] = IdxToInstr.size(); 6360 Ends.insert(Instr); 6361 } 6362 } 6363 } 6364 6365 // Saves the list of intervals that end with the index in 'key'. 6366 using InstrList = SmallVector<Instruction *, 2>; 6367 DenseMap<unsigned, InstrList> TransposeEnds; 6368 6369 // Transpose the EndPoints to a list of values that end at each index. 6370 for (auto &Interval : EndPoint) 6371 TransposeEnds[Interval.second].push_back(Interval.first); 6372 6373 SmallPtrSet<Instruction *, 8> OpenIntervals; 6374 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6375 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6376 6377 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6378 6379 // A lambda that gets the register usage for the given type and VF. 6380 const auto &TTICapture = TTI; 6381 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) { 6382 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6383 return 0U; 6384 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 6385 }; 6386 6387 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6388 Instruction *I = IdxToInstr[i]; 6389 6390 // Remove all of the instructions that end at this location. 6391 InstrList &List = TransposeEnds[i]; 6392 for (Instruction *ToRemove : List) 6393 OpenIntervals.erase(ToRemove); 6394 6395 // Ignore instructions that are never used within the loop. 6396 if (!Ends.count(I)) 6397 continue; 6398 6399 // Skip ignored values. 6400 if (ValuesToIgnore.count(I)) 6401 continue; 6402 6403 // For each VF find the maximum usage of registers. 6404 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6405 // Count the number of live intervals. 6406 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6407 6408 if (VFs[j].isScalar()) { 6409 for (auto Inst : OpenIntervals) { 6410 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6411 if (RegUsage.find(ClassID) == RegUsage.end()) 6412 RegUsage[ClassID] = 1; 6413 else 6414 RegUsage[ClassID] += 1; 6415 } 6416 } else { 6417 collectUniformsAndScalars(VFs[j]); 6418 for (auto Inst : OpenIntervals) { 6419 // Skip ignored values for VF > 1. 6420 if (VecValuesToIgnore.count(Inst)) 6421 continue; 6422 if (isScalarAfterVectorization(Inst, VFs[j])) { 6423 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6424 if (RegUsage.find(ClassID) == RegUsage.end()) 6425 RegUsage[ClassID] = 1; 6426 else 6427 RegUsage[ClassID] += 1; 6428 } else { 6429 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6430 if (RegUsage.find(ClassID) == RegUsage.end()) 6431 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6432 else 6433 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6434 } 6435 } 6436 } 6437 6438 for (auto& pair : RegUsage) { 6439 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6440 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6441 else 6442 MaxUsages[j][pair.first] = pair.second; 6443 } 6444 } 6445 6446 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6447 << OpenIntervals.size() << '\n'); 6448 6449 // Add the current instruction to the list of open intervals. 6450 OpenIntervals.insert(I); 6451 } 6452 6453 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6454 SmallMapVector<unsigned, unsigned, 4> Invariant; 6455 6456 for (auto Inst : LoopInvariants) { 6457 unsigned Usage = 6458 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6459 unsigned ClassID = 6460 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6461 if (Invariant.find(ClassID) == Invariant.end()) 6462 Invariant[ClassID] = Usage; 6463 else 6464 Invariant[ClassID] += Usage; 6465 } 6466 6467 LLVM_DEBUG({ 6468 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6469 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6470 << " item\n"; 6471 for (const auto &pair : MaxUsages[i]) { 6472 dbgs() << "LV(REG): RegisterClass: " 6473 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6474 << " registers\n"; 6475 } 6476 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6477 << " item\n"; 6478 for (const auto &pair : Invariant) { 6479 dbgs() << "LV(REG): RegisterClass: " 6480 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6481 << " registers\n"; 6482 } 6483 }); 6484 6485 RU.LoopInvariantRegs = Invariant; 6486 RU.MaxLocalUsers = MaxUsages[i]; 6487 RUs[i] = RU; 6488 } 6489 6490 return RUs; 6491 } 6492 6493 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6494 // TODO: Cost model for emulated masked load/store is completely 6495 // broken. This hack guides the cost model to use an artificially 6496 // high enough value to practically disable vectorization with such 6497 // operations, except where previously deployed legality hack allowed 6498 // using very low cost values. This is to avoid regressions coming simply 6499 // from moving "masked load/store" check from legality to cost model. 6500 // Masked Load/Gather emulation was previously never allowed. 6501 // Limited number of Masked Store/Scatter emulation was allowed. 6502 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 6503 return isa<LoadInst>(I) || 6504 (isa<StoreInst>(I) && 6505 NumPredStores > NumberOfStoresToPredicate); 6506 } 6507 6508 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6509 // If we aren't vectorizing the loop, or if we've already collected the 6510 // instructions to scalarize, there's nothing to do. Collection may already 6511 // have occurred if we have a user-selected VF and are now computing the 6512 // expected cost for interleaving. 6513 if (VF.isScalar() || VF.isZero() || 6514 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6515 return; 6516 6517 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6518 // not profitable to scalarize any instructions, the presence of VF in the 6519 // map will indicate that we've analyzed it already. 6520 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6521 6522 // Find all the instructions that are scalar with predication in the loop and 6523 // determine if it would be better to not if-convert the blocks they are in. 6524 // If so, we also record the instructions to scalarize. 6525 for (BasicBlock *BB : TheLoop->blocks()) { 6526 if (!blockNeedsPredication(BB)) 6527 continue; 6528 for (Instruction &I : *BB) 6529 if (isScalarWithPredication(&I)) { 6530 ScalarCostsTy ScalarCosts; 6531 // Do not apply discount logic if hacked cost is needed 6532 // for emulated masked memrefs. 6533 if (!useEmulatedMaskMemRefHack(&I) && 6534 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6535 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6536 // Remember that BB will remain after vectorization. 6537 PredicatedBBsAfterVectorization.insert(BB); 6538 } 6539 } 6540 } 6541 6542 int LoopVectorizationCostModel::computePredInstDiscount( 6543 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6544 assert(!isUniformAfterVectorization(PredInst, VF) && 6545 "Instruction marked uniform-after-vectorization will be predicated"); 6546 6547 // Initialize the discount to zero, meaning that the scalar version and the 6548 // vector version cost the same. 6549 InstructionCost Discount = 0; 6550 6551 // Holds instructions to analyze. The instructions we visit are mapped in 6552 // ScalarCosts. Those instructions are the ones that would be scalarized if 6553 // we find that the scalar version costs less. 6554 SmallVector<Instruction *, 8> Worklist; 6555 6556 // Returns true if the given instruction can be scalarized. 6557 auto canBeScalarized = [&](Instruction *I) -> bool { 6558 // We only attempt to scalarize instructions forming a single-use chain 6559 // from the original predicated block that would otherwise be vectorized. 6560 // Although not strictly necessary, we give up on instructions we know will 6561 // already be scalar to avoid traversing chains that are unlikely to be 6562 // beneficial. 6563 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6564 isScalarAfterVectorization(I, VF)) 6565 return false; 6566 6567 // If the instruction is scalar with predication, it will be analyzed 6568 // separately. We ignore it within the context of PredInst. 6569 if (isScalarWithPredication(I)) 6570 return false; 6571 6572 // If any of the instruction's operands are uniform after vectorization, 6573 // the instruction cannot be scalarized. This prevents, for example, a 6574 // masked load from being scalarized. 6575 // 6576 // We assume we will only emit a value for lane zero of an instruction 6577 // marked uniform after vectorization, rather than VF identical values. 6578 // Thus, if we scalarize an instruction that uses a uniform, we would 6579 // create uses of values corresponding to the lanes we aren't emitting code 6580 // for. This behavior can be changed by allowing getScalarValue to clone 6581 // the lane zero values for uniforms rather than asserting. 6582 for (Use &U : I->operands()) 6583 if (auto *J = dyn_cast<Instruction>(U.get())) 6584 if (isUniformAfterVectorization(J, VF)) 6585 return false; 6586 6587 // Otherwise, we can scalarize the instruction. 6588 return true; 6589 }; 6590 6591 // Compute the expected cost discount from scalarizing the entire expression 6592 // feeding the predicated instruction. We currently only consider expressions 6593 // that are single-use instruction chains. 6594 Worklist.push_back(PredInst); 6595 while (!Worklist.empty()) { 6596 Instruction *I = Worklist.pop_back_val(); 6597 6598 // If we've already analyzed the instruction, there's nothing to do. 6599 if (ScalarCosts.find(I) != ScalarCosts.end()) 6600 continue; 6601 6602 // Compute the cost of the vector instruction. Note that this cost already 6603 // includes the scalarization overhead of the predicated instruction. 6604 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6605 6606 // Compute the cost of the scalarized instruction. This cost is the cost of 6607 // the instruction as if it wasn't if-converted and instead remained in the 6608 // predicated block. We will scale this cost by block probability after 6609 // computing the scalarization overhead. 6610 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6611 InstructionCost ScalarCost = 6612 VF.getKnownMinValue() * 6613 getInstructionCost(I, ElementCount::getFixed(1)).first; 6614 6615 // Compute the scalarization overhead of needed insertelement instructions 6616 // and phi nodes. 6617 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6618 ScalarCost += TTI.getScalarizationOverhead( 6619 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6620 APInt::getAllOnesValue(VF.getKnownMinValue()), true, false); 6621 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6622 ScalarCost += 6623 VF.getKnownMinValue() * 6624 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6625 } 6626 6627 // Compute the scalarization overhead of needed extractelement 6628 // instructions. For each of the instruction's operands, if the operand can 6629 // be scalarized, add it to the worklist; otherwise, account for the 6630 // overhead. 6631 for (Use &U : I->operands()) 6632 if (auto *J = dyn_cast<Instruction>(U.get())) { 6633 assert(VectorType::isValidElementType(J->getType()) && 6634 "Instruction has non-scalar type"); 6635 if (canBeScalarized(J)) 6636 Worklist.push_back(J); 6637 else if (needsExtract(J, VF)) { 6638 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6639 ScalarCost += TTI.getScalarizationOverhead( 6640 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6641 APInt::getAllOnesValue(VF.getKnownMinValue()), false, true); 6642 } 6643 } 6644 6645 // Scale the total scalar cost by block probability. 6646 ScalarCost /= getReciprocalPredBlockProb(); 6647 6648 // Compute the discount. A non-negative discount means the vector version 6649 // of the instruction costs more, and scalarizing would be beneficial. 6650 Discount += VectorCost - ScalarCost; 6651 ScalarCosts[I] = ScalarCost; 6652 } 6653 6654 return *Discount.getValue(); 6655 } 6656 6657 LoopVectorizationCostModel::VectorizationCostTy 6658 LoopVectorizationCostModel::expectedCost(ElementCount VF) { 6659 VectorizationCostTy Cost; 6660 6661 // For each block. 6662 for (BasicBlock *BB : TheLoop->blocks()) { 6663 VectorizationCostTy BlockCost; 6664 6665 // For each instruction in the old loop. 6666 for (Instruction &I : BB->instructionsWithoutDebug()) { 6667 // Skip ignored values. 6668 if (ValuesToIgnore.count(&I) || 6669 (VF.isVector() && VecValuesToIgnore.count(&I))) 6670 continue; 6671 6672 VectorizationCostTy C = getInstructionCost(&I, VF); 6673 6674 // Check if we should override the cost. 6675 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6676 C.first = InstructionCost(ForceTargetInstructionCost); 6677 6678 BlockCost.first += C.first; 6679 BlockCost.second |= C.second; 6680 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6681 << " for VF " << VF << " For instruction: " << I 6682 << '\n'); 6683 } 6684 6685 // If we are vectorizing a predicated block, it will have been 6686 // if-converted. This means that the block's instructions (aside from 6687 // stores and instructions that may divide by zero) will now be 6688 // unconditionally executed. For the scalar case, we may not always execute 6689 // the predicated block, if it is an if-else block. Thus, scale the block's 6690 // cost by the probability of executing it. blockNeedsPredication from 6691 // Legal is used so as to not include all blocks in tail folded loops. 6692 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6693 BlockCost.first /= getReciprocalPredBlockProb(); 6694 6695 Cost.first += BlockCost.first; 6696 Cost.second |= BlockCost.second; 6697 } 6698 6699 return Cost; 6700 } 6701 6702 /// Gets Address Access SCEV after verifying that the access pattern 6703 /// is loop invariant except the induction variable dependence. 6704 /// 6705 /// This SCEV can be sent to the Target in order to estimate the address 6706 /// calculation cost. 6707 static const SCEV *getAddressAccessSCEV( 6708 Value *Ptr, 6709 LoopVectorizationLegality *Legal, 6710 PredicatedScalarEvolution &PSE, 6711 const Loop *TheLoop) { 6712 6713 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6714 if (!Gep) 6715 return nullptr; 6716 6717 // We are looking for a gep with all loop invariant indices except for one 6718 // which should be an induction variable. 6719 auto SE = PSE.getSE(); 6720 unsigned NumOperands = Gep->getNumOperands(); 6721 for (unsigned i = 1; i < NumOperands; ++i) { 6722 Value *Opd = Gep->getOperand(i); 6723 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6724 !Legal->isInductionVariable(Opd)) 6725 return nullptr; 6726 } 6727 6728 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6729 return PSE.getSCEV(Ptr); 6730 } 6731 6732 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6733 return Legal->hasStride(I->getOperand(0)) || 6734 Legal->hasStride(I->getOperand(1)); 6735 } 6736 6737 InstructionCost 6738 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6739 ElementCount VF) { 6740 assert(VF.isVector() && 6741 "Scalarization cost of instruction implies vectorization."); 6742 assert(!VF.isScalable() && "scalable vectors not yet supported."); 6743 Type *ValTy = getMemInstValueType(I); 6744 auto SE = PSE.getSE(); 6745 6746 unsigned AS = getLoadStoreAddressSpace(I); 6747 Value *Ptr = getLoadStorePointerOperand(I); 6748 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6749 6750 // Figure out whether the access is strided and get the stride value 6751 // if it's known in compile time 6752 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6753 6754 // Get the cost of the scalar memory instruction and address computation. 6755 InstructionCost Cost = 6756 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6757 6758 // Don't pass *I here, since it is scalar but will actually be part of a 6759 // vectorized loop where the user of it is a vectorized instruction. 6760 const Align Alignment = getLoadStoreAlignment(I); 6761 Cost += VF.getKnownMinValue() * 6762 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6763 AS, TTI::TCK_RecipThroughput); 6764 6765 // Get the overhead of the extractelement and insertelement instructions 6766 // we might create due to scalarization. 6767 Cost += getScalarizationOverhead(I, VF); 6768 6769 // If we have a predicated store, it may not be executed for each vector 6770 // lane. Scale the cost by the probability of executing the predicated 6771 // block. 6772 if (isPredicatedInst(I)) { 6773 Cost /= getReciprocalPredBlockProb(); 6774 6775 if (useEmulatedMaskMemRefHack(I)) 6776 // Artificially setting to a high enough value to practically disable 6777 // vectorization with such operations. 6778 Cost = 3000000; 6779 } 6780 6781 return Cost; 6782 } 6783 6784 InstructionCost 6785 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6786 ElementCount VF) { 6787 Type *ValTy = getMemInstValueType(I); 6788 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6789 Value *Ptr = getLoadStorePointerOperand(I); 6790 unsigned AS = getLoadStoreAddressSpace(I); 6791 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6792 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6793 6794 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6795 "Stride should be 1 or -1 for consecutive memory access"); 6796 const Align Alignment = getLoadStoreAlignment(I); 6797 InstructionCost Cost = 0; 6798 if (Legal->isMaskRequired(I)) 6799 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6800 CostKind); 6801 else 6802 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6803 CostKind, I); 6804 6805 bool Reverse = ConsecutiveStride < 0; 6806 if (Reverse) 6807 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6808 return Cost; 6809 } 6810 6811 InstructionCost 6812 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6813 ElementCount VF) { 6814 assert(Legal->isUniformMemOp(*I)); 6815 6816 Type *ValTy = getMemInstValueType(I); 6817 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6818 const Align Alignment = getLoadStoreAlignment(I); 6819 unsigned AS = getLoadStoreAddressSpace(I); 6820 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6821 if (isa<LoadInst>(I)) { 6822 return TTI.getAddressComputationCost(ValTy) + 6823 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6824 CostKind) + 6825 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6826 } 6827 StoreInst *SI = cast<StoreInst>(I); 6828 6829 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6830 return TTI.getAddressComputationCost(ValTy) + 6831 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6832 CostKind) + 6833 (isLoopInvariantStoreValue 6834 ? 0 6835 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6836 VF.getKnownMinValue() - 1)); 6837 } 6838 6839 InstructionCost 6840 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6841 ElementCount VF) { 6842 Type *ValTy = getMemInstValueType(I); 6843 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6844 const Align Alignment = getLoadStoreAlignment(I); 6845 const Value *Ptr = getLoadStorePointerOperand(I); 6846 6847 return TTI.getAddressComputationCost(VectorTy) + 6848 TTI.getGatherScatterOpCost( 6849 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6850 TargetTransformInfo::TCK_RecipThroughput, I); 6851 } 6852 6853 InstructionCost 6854 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6855 ElementCount VF) { 6856 // TODO: Once we have support for interleaving with scalable vectors 6857 // we can calculate the cost properly here. 6858 if (VF.isScalable()) 6859 return InstructionCost::getInvalid(); 6860 6861 Type *ValTy = getMemInstValueType(I); 6862 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6863 unsigned AS = getLoadStoreAddressSpace(I); 6864 6865 auto Group = getInterleavedAccessGroup(I); 6866 assert(Group && "Fail to get an interleaved access group."); 6867 6868 unsigned InterleaveFactor = Group->getFactor(); 6869 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6870 6871 // Holds the indices of existing members in an interleaved load group. 6872 // An interleaved store group doesn't need this as it doesn't allow gaps. 6873 SmallVector<unsigned, 4> Indices; 6874 if (isa<LoadInst>(I)) { 6875 for (unsigned i = 0; i < InterleaveFactor; i++) 6876 if (Group->getMember(i)) 6877 Indices.push_back(i); 6878 } 6879 6880 // Calculate the cost of the whole interleaved group. 6881 bool UseMaskForGaps = 6882 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 6883 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6884 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6885 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6886 6887 if (Group->isReverse()) { 6888 // TODO: Add support for reversed masked interleaved access. 6889 assert(!Legal->isMaskRequired(I) && 6890 "Reverse masked interleaved access not supported."); 6891 Cost += Group->getNumMembers() * 6892 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6893 } 6894 return Cost; 6895 } 6896 6897 InstructionCost LoopVectorizationCostModel::getReductionPatternCost( 6898 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6899 // Early exit for no inloop reductions 6900 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6901 return InstructionCost::getInvalid(); 6902 auto *VectorTy = cast<VectorType>(Ty); 6903 6904 // We are looking for a pattern of, and finding the minimal acceptable cost: 6905 // reduce(mul(ext(A), ext(B))) or 6906 // reduce(mul(A, B)) or 6907 // reduce(ext(A)) or 6908 // reduce(A). 6909 // The basic idea is that we walk down the tree to do that, finding the root 6910 // reduction instruction in InLoopReductionImmediateChains. From there we find 6911 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6912 // of the components. If the reduction cost is lower then we return it for the 6913 // reduction instruction and 0 for the other instructions in the pattern. If 6914 // it is not we return an invalid cost specifying the orignal cost method 6915 // should be used. 6916 Instruction *RetI = I; 6917 if ((RetI->getOpcode() == Instruction::SExt || 6918 RetI->getOpcode() == Instruction::ZExt)) { 6919 if (!RetI->hasOneUser()) 6920 return InstructionCost::getInvalid(); 6921 RetI = RetI->user_back(); 6922 } 6923 if (RetI->getOpcode() == Instruction::Mul && 6924 RetI->user_back()->getOpcode() == Instruction::Add) { 6925 if (!RetI->hasOneUser()) 6926 return InstructionCost::getInvalid(); 6927 RetI = RetI->user_back(); 6928 } 6929 6930 // Test if the found instruction is a reduction, and if not return an invalid 6931 // cost specifying the parent to use the original cost modelling. 6932 if (!InLoopReductionImmediateChains.count(RetI)) 6933 return InstructionCost::getInvalid(); 6934 6935 // Find the reduction this chain is a part of and calculate the basic cost of 6936 // the reduction on its own. 6937 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6938 Instruction *ReductionPhi = LastChain; 6939 while (!isa<PHINode>(ReductionPhi)) 6940 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6941 6942 RecurrenceDescriptor RdxDesc = 6943 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 6944 unsigned BaseCost = TTI.getArithmeticReductionCost(RdxDesc.getOpcode(), 6945 VectorTy, false, CostKind); 6946 6947 // Get the operand that was not the reduction chain and match it to one of the 6948 // patterns, returning the better cost if it is found. 6949 Instruction *RedOp = RetI->getOperand(1) == LastChain 6950 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6951 : dyn_cast<Instruction>(RetI->getOperand(1)); 6952 6953 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6954 6955 if (RedOp && (isa<SExtInst>(RedOp) || isa<ZExtInst>(RedOp)) && 6956 !TheLoop->isLoopInvariant(RedOp)) { 6957 bool IsUnsigned = isa<ZExtInst>(RedOp); 6958 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6959 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6960 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6961 CostKind); 6962 6963 unsigned ExtCost = 6964 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6965 TTI::CastContextHint::None, CostKind, RedOp); 6966 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6967 return I == RetI ? *RedCost.getValue() : 0; 6968 } else if (RedOp && RedOp->getOpcode() == Instruction::Mul) { 6969 Instruction *Mul = RedOp; 6970 Instruction *Op0 = dyn_cast<Instruction>(Mul->getOperand(0)); 6971 Instruction *Op1 = dyn_cast<Instruction>(Mul->getOperand(1)); 6972 if (Op0 && Op1 && (isa<SExtInst>(Op0) || isa<ZExtInst>(Op0)) && 6973 Op0->getOpcode() == Op1->getOpcode() && 6974 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6975 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6976 bool IsUnsigned = isa<ZExtInst>(Op0); 6977 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6978 // reduce(mul(ext, ext)) 6979 unsigned ExtCost = 6980 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 6981 TTI::CastContextHint::None, CostKind, Op0); 6982 unsigned MulCost = 6983 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6984 6985 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6986 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6987 CostKind); 6988 6989 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 6990 return I == RetI ? *RedCost.getValue() : 0; 6991 } else { 6992 unsigned MulCost = 6993 TTI.getArithmeticInstrCost(Mul->getOpcode(), VectorTy, CostKind); 6994 6995 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6996 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6997 CostKind); 6998 6999 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7000 return I == RetI ? *RedCost.getValue() : 0; 7001 } 7002 } 7003 7004 return I == RetI ? BaseCost : InstructionCost::getInvalid(); 7005 } 7006 7007 InstructionCost 7008 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7009 ElementCount VF) { 7010 // Calculate scalar cost only. Vectorization cost should be ready at this 7011 // moment. 7012 if (VF.isScalar()) { 7013 Type *ValTy = getMemInstValueType(I); 7014 const Align Alignment = getLoadStoreAlignment(I); 7015 unsigned AS = getLoadStoreAddressSpace(I); 7016 7017 return TTI.getAddressComputationCost(ValTy) + 7018 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7019 TTI::TCK_RecipThroughput, I); 7020 } 7021 return getWideningCost(I, VF); 7022 } 7023 7024 LoopVectorizationCostModel::VectorizationCostTy 7025 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7026 ElementCount VF) { 7027 // If we know that this instruction will remain uniform, check the cost of 7028 // the scalar version. 7029 if (isUniformAfterVectorization(I, VF)) 7030 VF = ElementCount::getFixed(1); 7031 7032 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7033 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7034 7035 // Forced scalars do not have any scalarization overhead. 7036 auto ForcedScalar = ForcedScalars.find(VF); 7037 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7038 auto InstSet = ForcedScalar->second; 7039 if (InstSet.count(I)) 7040 return VectorizationCostTy( 7041 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7042 VF.getKnownMinValue()), 7043 false); 7044 } 7045 7046 Type *VectorTy; 7047 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7048 7049 bool TypeNotScalarized = 7050 VF.isVector() && VectorTy->isVectorTy() && 7051 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7052 return VectorizationCostTy(C, TypeNotScalarized); 7053 } 7054 7055 InstructionCost 7056 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7057 ElementCount VF) { 7058 7059 assert(!VF.isScalable() && 7060 "cannot compute scalarization overhead for scalable vectorization"); 7061 if (VF.isScalar()) 7062 return 0; 7063 7064 InstructionCost Cost = 0; 7065 Type *RetTy = ToVectorTy(I->getType(), VF); 7066 if (!RetTy->isVoidTy() && 7067 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7068 Cost += TTI.getScalarizationOverhead( 7069 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 7070 true, false); 7071 7072 // Some targets keep addresses scalar. 7073 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7074 return Cost; 7075 7076 // Some targets support efficient element stores. 7077 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7078 return Cost; 7079 7080 // Collect operands to consider. 7081 CallInst *CI = dyn_cast<CallInst>(I); 7082 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7083 7084 // Skip operands that do not require extraction/scalarization and do not incur 7085 // any overhead. 7086 return Cost + TTI.getOperandsScalarizationOverhead( 7087 filterExtractingOperands(Ops, VF), VF.getKnownMinValue()); 7088 } 7089 7090 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7091 if (VF.isScalar()) 7092 return; 7093 NumPredStores = 0; 7094 for (BasicBlock *BB : TheLoop->blocks()) { 7095 // For each instruction in the old loop. 7096 for (Instruction &I : *BB) { 7097 Value *Ptr = getLoadStorePointerOperand(&I); 7098 if (!Ptr) 7099 continue; 7100 7101 // TODO: We should generate better code and update the cost model for 7102 // predicated uniform stores. Today they are treated as any other 7103 // predicated store (see added test cases in 7104 // invariant-store-vectorization.ll). 7105 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7106 NumPredStores++; 7107 7108 if (Legal->isUniformMemOp(I)) { 7109 // TODO: Avoid replicating loads and stores instead of 7110 // relying on instcombine to remove them. 7111 // Load: Scalar load + broadcast 7112 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7113 InstructionCost Cost = getUniformMemOpCost(&I, VF); 7114 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7115 continue; 7116 } 7117 7118 // We assume that widening is the best solution when possible. 7119 if (memoryInstructionCanBeWidened(&I, VF)) { 7120 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7121 int ConsecutiveStride = 7122 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7123 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7124 "Expected consecutive stride."); 7125 InstWidening Decision = 7126 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7127 setWideningDecision(&I, VF, Decision, Cost); 7128 continue; 7129 } 7130 7131 // Choose between Interleaving, Gather/Scatter or Scalarization. 7132 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7133 unsigned NumAccesses = 1; 7134 if (isAccessInterleaved(&I)) { 7135 auto Group = getInterleavedAccessGroup(&I); 7136 assert(Group && "Fail to get an interleaved access group."); 7137 7138 // Make one decision for the whole group. 7139 if (getWideningDecision(&I, VF) != CM_Unknown) 7140 continue; 7141 7142 NumAccesses = Group->getNumMembers(); 7143 if (interleavedAccessCanBeWidened(&I, VF)) 7144 InterleaveCost = getInterleaveGroupCost(&I, VF); 7145 } 7146 7147 InstructionCost GatherScatterCost = 7148 isLegalGatherOrScatter(&I) 7149 ? getGatherScatterCost(&I, VF) * NumAccesses 7150 : InstructionCost::getInvalid(); 7151 7152 InstructionCost ScalarizationCost = 7153 !VF.isScalable() ? getMemInstScalarizationCost(&I, VF) * NumAccesses 7154 : InstructionCost::getInvalid(); 7155 7156 // Choose better solution for the current VF, 7157 // write down this decision and use it during vectorization. 7158 InstructionCost Cost; 7159 InstWidening Decision; 7160 if (InterleaveCost <= GatherScatterCost && 7161 InterleaveCost < ScalarizationCost) { 7162 Decision = CM_Interleave; 7163 Cost = InterleaveCost; 7164 } else if (GatherScatterCost < ScalarizationCost) { 7165 Decision = CM_GatherScatter; 7166 Cost = GatherScatterCost; 7167 } else { 7168 assert(!VF.isScalable() && 7169 "We cannot yet scalarise for scalable vectors"); 7170 Decision = CM_Scalarize; 7171 Cost = ScalarizationCost; 7172 } 7173 // If the instructions belongs to an interleave group, the whole group 7174 // receives the same decision. The whole group receives the cost, but 7175 // the cost will actually be assigned to one instruction. 7176 if (auto Group = getInterleavedAccessGroup(&I)) 7177 setWideningDecision(Group, VF, Decision, Cost); 7178 else 7179 setWideningDecision(&I, VF, Decision, Cost); 7180 } 7181 } 7182 7183 // Make sure that any load of address and any other address computation 7184 // remains scalar unless there is gather/scatter support. This avoids 7185 // inevitable extracts into address registers, and also has the benefit of 7186 // activating LSR more, since that pass can't optimize vectorized 7187 // addresses. 7188 if (TTI.prefersVectorizedAddressing()) 7189 return; 7190 7191 // Start with all scalar pointer uses. 7192 SmallPtrSet<Instruction *, 8> AddrDefs; 7193 for (BasicBlock *BB : TheLoop->blocks()) 7194 for (Instruction &I : *BB) { 7195 Instruction *PtrDef = 7196 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7197 if (PtrDef && TheLoop->contains(PtrDef) && 7198 getWideningDecision(&I, VF) != CM_GatherScatter) 7199 AddrDefs.insert(PtrDef); 7200 } 7201 7202 // Add all instructions used to generate the addresses. 7203 SmallVector<Instruction *, 4> Worklist; 7204 append_range(Worklist, AddrDefs); 7205 while (!Worklist.empty()) { 7206 Instruction *I = Worklist.pop_back_val(); 7207 for (auto &Op : I->operands()) 7208 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7209 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7210 AddrDefs.insert(InstOp).second) 7211 Worklist.push_back(InstOp); 7212 } 7213 7214 for (auto *I : AddrDefs) { 7215 if (isa<LoadInst>(I)) { 7216 // Setting the desired widening decision should ideally be handled in 7217 // by cost functions, but since this involves the task of finding out 7218 // if the loaded register is involved in an address computation, it is 7219 // instead changed here when we know this is the case. 7220 InstWidening Decision = getWideningDecision(I, VF); 7221 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7222 // Scalarize a widened load of address. 7223 setWideningDecision( 7224 I, VF, CM_Scalarize, 7225 (VF.getKnownMinValue() * 7226 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7227 else if (auto Group = getInterleavedAccessGroup(I)) { 7228 // Scalarize an interleave group of address loads. 7229 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7230 if (Instruction *Member = Group->getMember(I)) 7231 setWideningDecision( 7232 Member, VF, CM_Scalarize, 7233 (VF.getKnownMinValue() * 7234 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7235 } 7236 } 7237 } else 7238 // Make sure I gets scalarized and a cost estimate without 7239 // scalarization overhead. 7240 ForcedScalars[VF].insert(I); 7241 } 7242 } 7243 7244 InstructionCost 7245 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7246 Type *&VectorTy) { 7247 Type *RetTy = I->getType(); 7248 if (canTruncateToMinimalBitwidth(I, VF)) 7249 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7250 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 7251 auto SE = PSE.getSE(); 7252 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7253 7254 // TODO: We need to estimate the cost of intrinsic calls. 7255 switch (I->getOpcode()) { 7256 case Instruction::GetElementPtr: 7257 // We mark this instruction as zero-cost because the cost of GEPs in 7258 // vectorized code depends on whether the corresponding memory instruction 7259 // is scalarized or not. Therefore, we handle GEPs with the memory 7260 // instruction cost. 7261 return 0; 7262 case Instruction::Br: { 7263 // In cases of scalarized and predicated instructions, there will be VF 7264 // predicated blocks in the vectorized loop. Each branch around these 7265 // blocks requires also an extract of its vector compare i1 element. 7266 bool ScalarPredicatedBB = false; 7267 BranchInst *BI = cast<BranchInst>(I); 7268 if (VF.isVector() && BI->isConditional() && 7269 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7270 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7271 ScalarPredicatedBB = true; 7272 7273 if (ScalarPredicatedBB) { 7274 // Return cost for branches around scalarized and predicated blocks. 7275 assert(!VF.isScalable() && "scalable vectors not yet supported."); 7276 auto *Vec_i1Ty = 7277 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7278 return (TTI.getScalarizationOverhead( 7279 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7280 false, true) + 7281 (TTI.getCFInstrCost(Instruction::Br, CostKind) * 7282 VF.getKnownMinValue())); 7283 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7284 // The back-edge branch will remain, as will all scalar branches. 7285 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7286 else 7287 // This branch will be eliminated by if-conversion. 7288 return 0; 7289 // Note: We currently assume zero cost for an unconditional branch inside 7290 // a predicated block since it will become a fall-through, although we 7291 // may decide in the future to call TTI for all branches. 7292 } 7293 case Instruction::PHI: { 7294 auto *Phi = cast<PHINode>(I); 7295 7296 // First-order recurrences are replaced by vector shuffles inside the loop. 7297 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7298 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7299 return TTI.getShuffleCost( 7300 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7301 VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7302 7303 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7304 // converted into select instructions. We require N - 1 selects per phi 7305 // node, where N is the number of incoming values. 7306 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7307 return (Phi->getNumIncomingValues() - 1) * 7308 TTI.getCmpSelInstrCost( 7309 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7310 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7311 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7312 7313 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7314 } 7315 case Instruction::UDiv: 7316 case Instruction::SDiv: 7317 case Instruction::URem: 7318 case Instruction::SRem: 7319 // If we have a predicated instruction, it may not be executed for each 7320 // vector lane. Get the scalarization cost and scale this amount by the 7321 // probability of executing the predicated block. If the instruction is not 7322 // predicated, we fall through to the next case. 7323 if (VF.isVector() && isScalarWithPredication(I)) { 7324 InstructionCost Cost = 0; 7325 7326 // These instructions have a non-void type, so account for the phi nodes 7327 // that we will create. This cost is likely to be zero. The phi node 7328 // cost, if any, should be scaled by the block probability because it 7329 // models a copy at the end of each predicated block. 7330 Cost += VF.getKnownMinValue() * 7331 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7332 7333 // The cost of the non-predicated instruction. 7334 Cost += VF.getKnownMinValue() * 7335 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7336 7337 // The cost of insertelement and extractelement instructions needed for 7338 // scalarization. 7339 Cost += getScalarizationOverhead(I, VF); 7340 7341 // Scale the cost by the probability of executing the predicated blocks. 7342 // This assumes the predicated block for each vector lane is equally 7343 // likely. 7344 return Cost / getReciprocalPredBlockProb(); 7345 } 7346 LLVM_FALLTHROUGH; 7347 case Instruction::Add: 7348 case Instruction::FAdd: 7349 case Instruction::Sub: 7350 case Instruction::FSub: 7351 case Instruction::Mul: 7352 case Instruction::FMul: 7353 case Instruction::FDiv: 7354 case Instruction::FRem: 7355 case Instruction::Shl: 7356 case Instruction::LShr: 7357 case Instruction::AShr: 7358 case Instruction::And: 7359 case Instruction::Or: 7360 case Instruction::Xor: { 7361 // Since we will replace the stride by 1 the multiplication should go away. 7362 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7363 return 0; 7364 7365 // Detect reduction patterns 7366 InstructionCost RedCost; 7367 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7368 .isValid()) 7369 return RedCost; 7370 7371 // Certain instructions can be cheaper to vectorize if they have a constant 7372 // second vector operand. One example of this are shifts on x86. 7373 Value *Op2 = I->getOperand(1); 7374 TargetTransformInfo::OperandValueProperties Op2VP; 7375 TargetTransformInfo::OperandValueKind Op2VK = 7376 TTI.getOperandInfo(Op2, Op2VP); 7377 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7378 Op2VK = TargetTransformInfo::OK_UniformValue; 7379 7380 SmallVector<const Value *, 4> Operands(I->operand_values()); 7381 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7382 return N * TTI.getArithmeticInstrCost( 7383 I->getOpcode(), VectorTy, CostKind, 7384 TargetTransformInfo::OK_AnyValue, 7385 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7386 } 7387 case Instruction::FNeg: { 7388 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 7389 unsigned N = isScalarAfterVectorization(I, VF) ? VF.getKnownMinValue() : 1; 7390 return N * TTI.getArithmeticInstrCost( 7391 I->getOpcode(), VectorTy, CostKind, 7392 TargetTransformInfo::OK_AnyValue, 7393 TargetTransformInfo::OK_AnyValue, 7394 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 7395 I->getOperand(0), I); 7396 } 7397 case Instruction::Select: { 7398 SelectInst *SI = cast<SelectInst>(I); 7399 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7400 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7401 Type *CondTy = SI->getCondition()->getType(); 7402 if (!ScalarCond) 7403 CondTy = VectorType::get(CondTy, VF); 7404 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7405 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7406 } 7407 case Instruction::ICmp: 7408 case Instruction::FCmp: { 7409 Type *ValTy = I->getOperand(0)->getType(); 7410 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7411 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7412 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7413 VectorTy = ToVectorTy(ValTy, VF); 7414 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7415 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7416 } 7417 case Instruction::Store: 7418 case Instruction::Load: { 7419 ElementCount Width = VF; 7420 if (Width.isVector()) { 7421 InstWidening Decision = getWideningDecision(I, Width); 7422 assert(Decision != CM_Unknown && 7423 "CM decision should be taken at this point"); 7424 if (Decision == CM_Scalarize) 7425 Width = ElementCount::getFixed(1); 7426 } 7427 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 7428 return getMemoryInstructionCost(I, VF); 7429 } 7430 case Instruction::ZExt: 7431 case Instruction::SExt: 7432 case Instruction::FPToUI: 7433 case Instruction::FPToSI: 7434 case Instruction::FPExt: 7435 case Instruction::PtrToInt: 7436 case Instruction::IntToPtr: 7437 case Instruction::SIToFP: 7438 case Instruction::UIToFP: 7439 case Instruction::Trunc: 7440 case Instruction::FPTrunc: 7441 case Instruction::BitCast: { 7442 // Computes the CastContextHint from a Load/Store instruction. 7443 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7444 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7445 "Expected a load or a store!"); 7446 7447 if (VF.isScalar() || !TheLoop->contains(I)) 7448 return TTI::CastContextHint::Normal; 7449 7450 switch (getWideningDecision(I, VF)) { 7451 case LoopVectorizationCostModel::CM_GatherScatter: 7452 return TTI::CastContextHint::GatherScatter; 7453 case LoopVectorizationCostModel::CM_Interleave: 7454 return TTI::CastContextHint::Interleave; 7455 case LoopVectorizationCostModel::CM_Scalarize: 7456 case LoopVectorizationCostModel::CM_Widen: 7457 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7458 : TTI::CastContextHint::Normal; 7459 case LoopVectorizationCostModel::CM_Widen_Reverse: 7460 return TTI::CastContextHint::Reversed; 7461 case LoopVectorizationCostModel::CM_Unknown: 7462 llvm_unreachable("Instr did not go through cost modelling?"); 7463 } 7464 7465 llvm_unreachable("Unhandled case!"); 7466 }; 7467 7468 unsigned Opcode = I->getOpcode(); 7469 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7470 // For Trunc, the context is the only user, which must be a StoreInst. 7471 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7472 if (I->hasOneUse()) 7473 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7474 CCH = ComputeCCH(Store); 7475 } 7476 // For Z/Sext, the context is the operand, which must be a LoadInst. 7477 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7478 Opcode == Instruction::FPExt) { 7479 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7480 CCH = ComputeCCH(Load); 7481 } 7482 7483 // We optimize the truncation of induction variables having constant 7484 // integer steps. The cost of these truncations is the same as the scalar 7485 // operation. 7486 if (isOptimizableIVTruncate(I, VF)) { 7487 auto *Trunc = cast<TruncInst>(I); 7488 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7489 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7490 } 7491 7492 // Detect reduction patterns 7493 InstructionCost RedCost; 7494 if ((RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7495 .isValid()) 7496 return RedCost; 7497 7498 Type *SrcScalarTy = I->getOperand(0)->getType(); 7499 Type *SrcVecTy = 7500 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7501 if (canTruncateToMinimalBitwidth(I, VF)) { 7502 // This cast is going to be shrunk. This may remove the cast or it might 7503 // turn it into slightly different cast. For example, if MinBW == 16, 7504 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7505 // 7506 // Calculate the modified src and dest types. 7507 Type *MinVecTy = VectorTy; 7508 if (Opcode == Instruction::Trunc) { 7509 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7510 VectorTy = 7511 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7512 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7513 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7514 VectorTy = 7515 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7516 } 7517 } 7518 7519 unsigned N; 7520 if (isScalarAfterVectorization(I, VF)) { 7521 assert(!VF.isScalable() && "VF is assumed to be non scalable"); 7522 N = VF.getKnownMinValue(); 7523 } else 7524 N = 1; 7525 return N * 7526 TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7527 } 7528 case Instruction::Call: { 7529 bool NeedToScalarize; 7530 CallInst *CI = cast<CallInst>(I); 7531 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7532 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7533 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7534 return std::min(CallCost, IntrinsicCost); 7535 } 7536 return CallCost; 7537 } 7538 case Instruction::ExtractValue: 7539 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7540 default: 7541 // The cost of executing VF copies of the scalar instruction. This opcode 7542 // is unknown. Assume that it is the same as 'mul'. 7543 return VF.getKnownMinValue() * TTI.getArithmeticInstrCost( 7544 Instruction::Mul, VectorTy, CostKind) + 7545 getScalarizationOverhead(I, VF); 7546 } // end of switch. 7547 } 7548 7549 char LoopVectorize::ID = 0; 7550 7551 static const char lv_name[] = "Loop Vectorization"; 7552 7553 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7554 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7555 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7556 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7557 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7558 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7559 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7560 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7561 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7562 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7563 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7564 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7565 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7566 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7567 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7568 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7569 7570 namespace llvm { 7571 7572 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7573 7574 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7575 bool VectorizeOnlyWhenForced) { 7576 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7577 } 7578 7579 } // end namespace llvm 7580 7581 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7582 // Check if the pointer operand of a load or store instruction is 7583 // consecutive. 7584 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7585 return Legal->isConsecutivePtr(Ptr); 7586 return false; 7587 } 7588 7589 void LoopVectorizationCostModel::collectValuesToIgnore() { 7590 // Ignore ephemeral values. 7591 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7592 7593 // Ignore type-promoting instructions we identified during reduction 7594 // detection. 7595 for (auto &Reduction : Legal->getReductionVars()) { 7596 RecurrenceDescriptor &RedDes = Reduction.second; 7597 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7598 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7599 } 7600 // Ignore type-casting instructions we identified during induction 7601 // detection. 7602 for (auto &Induction : Legal->getInductionVars()) { 7603 InductionDescriptor &IndDes = Induction.second; 7604 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7605 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7606 } 7607 } 7608 7609 void LoopVectorizationCostModel::collectInLoopReductions() { 7610 for (auto &Reduction : Legal->getReductionVars()) { 7611 PHINode *Phi = Reduction.first; 7612 RecurrenceDescriptor &RdxDesc = Reduction.second; 7613 7614 // We don't collect reductions that are type promoted (yet). 7615 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7616 continue; 7617 7618 // If the target would prefer this reduction to happen "in-loop", then we 7619 // want to record it as such. 7620 unsigned Opcode = RdxDesc.getOpcode(); 7621 if (!PreferInLoopReductions && 7622 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7623 TargetTransformInfo::ReductionFlags())) 7624 continue; 7625 7626 // Check that we can correctly put the reductions into the loop, by 7627 // finding the chain of operations that leads from the phi to the loop 7628 // exit value. 7629 SmallVector<Instruction *, 4> ReductionOperations = 7630 RdxDesc.getReductionOpChain(Phi, TheLoop); 7631 bool InLoop = !ReductionOperations.empty(); 7632 if (InLoop) { 7633 InLoopReductionChains[Phi] = ReductionOperations; 7634 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7635 Instruction *LastChain = Phi; 7636 for (auto *I : ReductionOperations) { 7637 InLoopReductionImmediateChains[I] = LastChain; 7638 LastChain = I; 7639 } 7640 } 7641 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7642 << " reduction for phi: " << *Phi << "\n"); 7643 } 7644 } 7645 7646 // TODO: we could return a pair of values that specify the max VF and 7647 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7648 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7649 // doesn't have a cost model that can choose which plan to execute if 7650 // more than one is generated. 7651 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7652 LoopVectorizationCostModel &CM) { 7653 unsigned WidestType; 7654 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7655 return WidestVectorRegBits / WidestType; 7656 } 7657 7658 VectorizationFactor 7659 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7660 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7661 ElementCount VF = UserVF; 7662 // Outer loop handling: They may require CFG and instruction level 7663 // transformations before even evaluating whether vectorization is profitable. 7664 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7665 // the vectorization pipeline. 7666 if (!OrigLoop->isInnermost()) { 7667 // If the user doesn't provide a vectorization factor, determine a 7668 // reasonable one. 7669 if (UserVF.isZero()) { 7670 VF = ElementCount::getFixed( 7671 determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM)); 7672 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7673 7674 // Make sure we have a VF > 1 for stress testing. 7675 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7676 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7677 << "overriding computed VF.\n"); 7678 VF = ElementCount::getFixed(4); 7679 } 7680 } 7681 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7682 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7683 "VF needs to be a power of two"); 7684 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7685 << "VF " << VF << " to build VPlans.\n"); 7686 buildVPlans(VF, VF); 7687 7688 // For VPlan build stress testing, we bail out after VPlan construction. 7689 if (VPlanBuildStressTest) 7690 return VectorizationFactor::Disabled(); 7691 7692 return {VF, 0 /*Cost*/}; 7693 } 7694 7695 LLVM_DEBUG( 7696 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7697 "VPlan-native path.\n"); 7698 return VectorizationFactor::Disabled(); 7699 } 7700 7701 Optional<VectorizationFactor> 7702 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7703 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7704 Optional<ElementCount> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 7705 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 7706 return None; 7707 7708 // Invalidate interleave groups if all blocks of loop will be predicated. 7709 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 7710 !useMaskedInterleavedAccesses(*TTI)) { 7711 LLVM_DEBUG( 7712 dbgs() 7713 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7714 "which requires masked-interleaved support.\n"); 7715 if (CM.InterleaveInfo.invalidateGroups()) 7716 // Invalidating interleave groups also requires invalidating all decisions 7717 // based on them, which includes widening decisions and uniform and scalar 7718 // values. 7719 CM.invalidateCostModelingDecisions(); 7720 } 7721 7722 ElementCount MaxVF = MaybeMaxVF.getValue(); 7723 assert(MaxVF.isNonZero() && "MaxVF is zero."); 7724 7725 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxVF); 7726 if (!UserVF.isZero() && 7727 (UserVFIsLegal || (UserVF.isScalable() && MaxVF.isScalable()))) { 7728 // FIXME: MaxVF is temporarily used inplace of UserVF for illegal scalable 7729 // VFs here, this should be reverted to only use legal UserVFs once the 7730 // loop below supports scalable VFs. 7731 ElementCount VF = UserVFIsLegal ? UserVF : MaxVF; 7732 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVFIsLegal ? "user" : "max") 7733 << " VF " << VF << ".\n"); 7734 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7735 "VF needs to be a power of two"); 7736 // Collect the instructions (and their associated costs) that will be more 7737 // profitable to scalarize. 7738 CM.selectUserVectorizationFactor(VF); 7739 CM.collectInLoopReductions(); 7740 buildVPlansWithVPRecipes(VF, VF); 7741 LLVM_DEBUG(printPlans(dbgs())); 7742 return {{VF, 0}}; 7743 } 7744 7745 assert(!MaxVF.isScalable() && 7746 "Scalable vectors not yet supported beyond this point"); 7747 7748 for (ElementCount VF = ElementCount::getFixed(1); 7749 ElementCount::isKnownLE(VF, MaxVF); VF *= 2) { 7750 // Collect Uniform and Scalar instructions after vectorization with VF. 7751 CM.collectUniformsAndScalars(VF); 7752 7753 // Collect the instructions (and their associated costs) that will be more 7754 // profitable to scalarize. 7755 if (VF.isVector()) 7756 CM.collectInstsToScalarize(VF); 7757 } 7758 7759 CM.collectInLoopReductions(); 7760 7761 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxVF); 7762 LLVM_DEBUG(printPlans(dbgs())); 7763 if (MaxVF.isScalar()) 7764 return VectorizationFactor::Disabled(); 7765 7766 // Select the optimal vectorization factor. 7767 return CM.selectVectorizationFactor(MaxVF); 7768 } 7769 7770 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 7771 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 7772 << '\n'); 7773 BestVF = VF; 7774 BestUF = UF; 7775 7776 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 7777 return !Plan->hasVF(VF); 7778 }); 7779 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 7780 } 7781 7782 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 7783 DominatorTree *DT) { 7784 // Perform the actual loop transformation. 7785 7786 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 7787 VPCallbackILV CallbackILV(ILV); 7788 7789 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 7790 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 7791 7792 VPTransformState State{*BestVF, BestUF, 7793 LI, DT, 7794 ILV.Builder, ILV.VectorLoopValueMap, 7795 &ILV, VPlans.front().get(), 7796 CallbackILV}; 7797 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 7798 State.TripCount = ILV.getOrCreateTripCount(nullptr); 7799 State.CanonicalIV = ILV.Induction; 7800 7801 ILV.printDebugTracesAtStart(); 7802 7803 //===------------------------------------------------===// 7804 // 7805 // Notice: any optimization or new instruction that go 7806 // into the code below should also be implemented in 7807 // the cost-model. 7808 // 7809 //===------------------------------------------------===// 7810 7811 // 2. Copy and widen instructions from the old loop into the new loop. 7812 VPlans.front()->execute(&State); 7813 7814 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7815 // predication, updating analyses. 7816 ILV.fixVectorizedLoop(State); 7817 7818 ILV.printDebugTracesAtEnd(); 7819 } 7820 7821 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 7822 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 7823 7824 // We create new control-flow for the vectorized loop, so the original exit 7825 // conditions will be dead after vectorization if it's only used by the 7826 // terminator 7827 SmallVector<BasicBlock*> ExitingBlocks; 7828 OrigLoop->getExitingBlocks(ExitingBlocks); 7829 for (auto *BB : ExitingBlocks) { 7830 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 7831 if (!Cmp || !Cmp->hasOneUse()) 7832 continue; 7833 7834 // TODO: we should introduce a getUniqueExitingBlocks on Loop 7835 if (!DeadInstructions.insert(Cmp).second) 7836 continue; 7837 7838 // The operands of the icmp is often a dead trunc, used by IndUpdate. 7839 // TODO: can recurse through operands in general 7840 for (Value *Op : Cmp->operands()) { 7841 if (isa<TruncInst>(Op) && Op->hasOneUse()) 7842 DeadInstructions.insert(cast<Instruction>(Op)); 7843 } 7844 } 7845 7846 // We create new "steps" for induction variable updates to which the original 7847 // induction variables map. An original update instruction will be dead if 7848 // all its users except the induction variable are dead. 7849 auto *Latch = OrigLoop->getLoopLatch(); 7850 for (auto &Induction : Legal->getInductionVars()) { 7851 PHINode *Ind = Induction.first; 7852 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 7853 7854 // If the tail is to be folded by masking, the primary induction variable, 7855 // if exists, isn't dead: it will be used for masking. Don't kill it. 7856 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 7857 continue; 7858 7859 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 7860 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 7861 })) 7862 DeadInstructions.insert(IndUpdate); 7863 7864 // We record as "Dead" also the type-casting instructions we had identified 7865 // during induction analysis. We don't need any handling for them in the 7866 // vectorized loop because we have proven that, under a proper runtime 7867 // test guarding the vectorized loop, the value of the phi, and the casted 7868 // value of the phi, are the same. The last instruction in this casting chain 7869 // will get its scalar/vector/widened def from the scalar/vector/widened def 7870 // of the respective phi node. Any other casts in the induction def-use chain 7871 // have no other uses outside the phi update chain, and will be ignored. 7872 InductionDescriptor &IndDes = Induction.second; 7873 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7874 DeadInstructions.insert(Casts.begin(), Casts.end()); 7875 } 7876 } 7877 7878 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7879 7880 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7881 7882 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7883 Instruction::BinaryOps BinOp) { 7884 // When unrolling and the VF is 1, we only need to add a simple scalar. 7885 Type *Ty = Val->getType(); 7886 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7887 7888 if (Ty->isFloatingPointTy()) { 7889 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7890 7891 // Floating point operations had to be 'fast' to enable the unrolling. 7892 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7893 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7894 } 7895 Constant *C = ConstantInt::get(Ty, StartIdx); 7896 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7897 } 7898 7899 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7900 SmallVector<Metadata *, 4> MDs; 7901 // Reserve first location for self reference to the LoopID metadata node. 7902 MDs.push_back(nullptr); 7903 bool IsUnrollMetadata = false; 7904 MDNode *LoopID = L->getLoopID(); 7905 if (LoopID) { 7906 // First find existing loop unrolling disable metadata. 7907 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7908 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7909 if (MD) { 7910 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7911 IsUnrollMetadata = 7912 S && S->getString().startswith("llvm.loop.unroll.disable"); 7913 } 7914 MDs.push_back(LoopID->getOperand(i)); 7915 } 7916 } 7917 7918 if (!IsUnrollMetadata) { 7919 // Add runtime unroll disable metadata. 7920 LLVMContext &Context = L->getHeader()->getContext(); 7921 SmallVector<Metadata *, 1> DisableOperands; 7922 DisableOperands.push_back( 7923 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7924 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7925 MDs.push_back(DisableNode); 7926 MDNode *NewLoopID = MDNode::get(Context, MDs); 7927 // Set operand 0 to refer to the loop id itself. 7928 NewLoopID->replaceOperandWith(0, NewLoopID); 7929 L->setLoopID(NewLoopID); 7930 } 7931 } 7932 7933 //===--------------------------------------------------------------------===// 7934 // EpilogueVectorizerMainLoop 7935 //===--------------------------------------------------------------------===// 7936 7937 /// This function is partially responsible for generating the control flow 7938 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7939 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7940 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7941 Loop *Lp = createVectorLoopSkeleton(""); 7942 7943 // Generate the code to check the minimum iteration count of the vector 7944 // epilogue (see below). 7945 EPI.EpilogueIterationCountCheck = 7946 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 7947 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7948 7949 // Generate the code to check any assumptions that we've made for SCEV 7950 // expressions. 7951 BasicBlock *SavedPreHeader = LoopVectorPreHeader; 7952 emitSCEVChecks(Lp, LoopScalarPreHeader); 7953 7954 // If a safety check was generated save it. 7955 if (SavedPreHeader != LoopVectorPreHeader) 7956 EPI.SCEVSafetyCheck = SavedPreHeader; 7957 7958 // Generate the code that checks at runtime if arrays overlap. We put the 7959 // checks into a separate block to make the more common case of few elements 7960 // faster. 7961 SavedPreHeader = LoopVectorPreHeader; 7962 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 7963 7964 // If a safety check was generated save/overwite it. 7965 if (SavedPreHeader != LoopVectorPreHeader) 7966 EPI.MemSafetyCheck = SavedPreHeader; 7967 7968 // Generate the iteration count check for the main loop, *after* the check 7969 // for the epilogue loop, so that the path-length is shorter for the case 7970 // that goes directly through the vector epilogue. The longer-path length for 7971 // the main loop is compensated for, by the gain from vectorizing the larger 7972 // trip count. Note: the branch will get updated later on when we vectorize 7973 // the epilogue. 7974 EPI.MainLoopIterationCountCheck = 7975 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 7976 7977 // Generate the induction variable. 7978 OldInduction = Legal->getPrimaryInduction(); 7979 Type *IdxTy = Legal->getWidestInductionType(); 7980 Value *StartIdx = ConstantInt::get(IdxTy, 0); 7981 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 7982 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 7983 EPI.VectorTripCount = CountRoundDown; 7984 Induction = 7985 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 7986 getDebugLocFromInstOrOperands(OldInduction)); 7987 7988 // Skip induction resume value creation here because they will be created in 7989 // the second pass. If we created them here, they wouldn't be used anyway, 7990 // because the vplan in the second pass still contains the inductions from the 7991 // original loop. 7992 7993 return completeLoopSkeleton(Lp, OrigLoopID); 7994 } 7995 7996 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7997 LLVM_DEBUG({ 7998 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7999 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8000 << ", Main Loop UF:" << EPI.MainLoopUF 8001 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8002 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8003 }); 8004 } 8005 8006 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8007 DEBUG_WITH_TYPE(VerboseDebug, { 8008 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8009 }); 8010 } 8011 8012 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8013 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8014 assert(L && "Expected valid Loop."); 8015 assert(Bypass && "Expected valid bypass basic block."); 8016 unsigned VFactor = 8017 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 8018 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8019 Value *Count = getOrCreateTripCount(L); 8020 // Reuse existing vector loop preheader for TC checks. 8021 // Note that new preheader block is generated for vector loop. 8022 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8023 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8024 8025 // Generate code to check if the loop's trip count is less than VF * UF of the 8026 // main vector loop. 8027 auto P = 8028 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8029 8030 Value *CheckMinIters = Builder.CreateICmp( 8031 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 8032 "min.iters.check"); 8033 8034 if (!ForEpilogue) 8035 TCCheckBlock->setName("vector.main.loop.iter.check"); 8036 8037 // Create new preheader for vector loop. 8038 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8039 DT, LI, nullptr, "vector.ph"); 8040 8041 if (ForEpilogue) { 8042 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8043 DT->getNode(Bypass)->getIDom()) && 8044 "TC check is expected to dominate Bypass"); 8045 8046 // Update dominator for Bypass & LoopExit. 8047 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8048 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8049 8050 LoopBypassBlocks.push_back(TCCheckBlock); 8051 8052 // Save the trip count so we don't have to regenerate it in the 8053 // vec.epilog.iter.check. This is safe to do because the trip count 8054 // generated here dominates the vector epilog iter check. 8055 EPI.TripCount = Count; 8056 } 8057 8058 ReplaceInstWithInst( 8059 TCCheckBlock->getTerminator(), 8060 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8061 8062 return TCCheckBlock; 8063 } 8064 8065 //===--------------------------------------------------------------------===// 8066 // EpilogueVectorizerEpilogueLoop 8067 //===--------------------------------------------------------------------===// 8068 8069 /// This function is partially responsible for generating the control flow 8070 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8071 BasicBlock * 8072 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8073 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8074 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8075 8076 // Now, compare the remaining count and if there aren't enough iterations to 8077 // execute the vectorized epilogue skip to the scalar part. 8078 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8079 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8080 LoopVectorPreHeader = 8081 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8082 LI, nullptr, "vec.epilog.ph"); 8083 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8084 VecEpilogueIterationCountCheck); 8085 8086 // Adjust the control flow taking the state info from the main loop 8087 // vectorization into account. 8088 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8089 "expected this to be saved from the previous pass."); 8090 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8091 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8092 8093 DT->changeImmediateDominator(LoopVectorPreHeader, 8094 EPI.MainLoopIterationCountCheck); 8095 8096 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8097 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8098 8099 if (EPI.SCEVSafetyCheck) 8100 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8101 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8102 if (EPI.MemSafetyCheck) 8103 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8104 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8105 8106 DT->changeImmediateDominator( 8107 VecEpilogueIterationCountCheck, 8108 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8109 8110 DT->changeImmediateDominator(LoopScalarPreHeader, 8111 EPI.EpilogueIterationCountCheck); 8112 DT->changeImmediateDominator(LoopExitBlock, EPI.EpilogueIterationCountCheck); 8113 8114 // Keep track of bypass blocks, as they feed start values to the induction 8115 // phis in the scalar loop preheader. 8116 if (EPI.SCEVSafetyCheck) 8117 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8118 if (EPI.MemSafetyCheck) 8119 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8120 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8121 8122 // Generate a resume induction for the vector epilogue and put it in the 8123 // vector epilogue preheader 8124 Type *IdxTy = Legal->getWidestInductionType(); 8125 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8126 LoopVectorPreHeader->getFirstNonPHI()); 8127 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8128 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8129 EPI.MainLoopIterationCountCheck); 8130 8131 // Generate the induction variable. 8132 OldInduction = Legal->getPrimaryInduction(); 8133 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8134 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8135 Value *StartIdx = EPResumeVal; 8136 Induction = 8137 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8138 getDebugLocFromInstOrOperands(OldInduction)); 8139 8140 // Generate induction resume values. These variables save the new starting 8141 // indexes for the scalar loop. They are used to test if there are any tail 8142 // iterations left once the vector loop has completed. 8143 // Note that when the vectorized epilogue is skipped due to iteration count 8144 // check, then the resume value for the induction variable comes from 8145 // the trip count of the main vector loop, hence passing the AdditionalBypass 8146 // argument. 8147 createInductionResumeValues(Lp, CountRoundDown, 8148 {VecEpilogueIterationCountCheck, 8149 EPI.VectorTripCount} /* AdditionalBypass */); 8150 8151 AddRuntimeUnrollDisableMetaData(Lp); 8152 return completeLoopSkeleton(Lp, OrigLoopID); 8153 } 8154 8155 BasicBlock * 8156 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8157 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8158 8159 assert(EPI.TripCount && 8160 "Expected trip count to have been safed in the first pass."); 8161 assert( 8162 (!isa<Instruction>(EPI.TripCount) || 8163 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8164 "saved trip count does not dominate insertion point."); 8165 Value *TC = EPI.TripCount; 8166 IRBuilder<> Builder(Insert->getTerminator()); 8167 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8168 8169 // Generate code to check if the loop's trip count is less than VF * UF of the 8170 // vector epilogue loop. 8171 auto P = 8172 Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8173 8174 Value *CheckMinIters = Builder.CreateICmp( 8175 P, Count, 8176 ConstantInt::get(Count->getType(), 8177 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8178 "min.epilog.iters.check"); 8179 8180 ReplaceInstWithInst( 8181 Insert->getTerminator(), 8182 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8183 8184 LoopBypassBlocks.push_back(Insert); 8185 return Insert; 8186 } 8187 8188 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8189 LLVM_DEBUG({ 8190 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8191 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8192 << ", Main Loop UF:" << EPI.MainLoopUF 8193 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8194 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8195 }); 8196 } 8197 8198 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8199 DEBUG_WITH_TYPE(VerboseDebug, { 8200 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8201 }); 8202 } 8203 8204 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8205 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8206 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8207 bool PredicateAtRangeStart = Predicate(Range.Start); 8208 8209 for (ElementCount TmpVF = Range.Start * 2; 8210 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8211 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8212 Range.End = TmpVF; 8213 break; 8214 } 8215 8216 return PredicateAtRangeStart; 8217 } 8218 8219 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8220 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8221 /// of VF's starting at a given VF and extending it as much as possible. Each 8222 /// vectorization decision can potentially shorten this sub-range during 8223 /// buildVPlan(). 8224 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8225 ElementCount MaxVF) { 8226 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8227 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8228 VFRange SubRange = {VF, MaxVFPlusOne}; 8229 VPlans.push_back(buildVPlan(SubRange)); 8230 VF = SubRange.End; 8231 } 8232 } 8233 8234 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8235 VPlanPtr &Plan) { 8236 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8237 8238 // Look for cached value. 8239 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8240 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8241 if (ECEntryIt != EdgeMaskCache.end()) 8242 return ECEntryIt->second; 8243 8244 VPValue *SrcMask = createBlockInMask(Src, Plan); 8245 8246 // The terminator has to be a branch inst! 8247 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8248 assert(BI && "Unexpected terminator found"); 8249 8250 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8251 return EdgeMaskCache[Edge] = SrcMask; 8252 8253 // If source is an exiting block, we know the exit edge is dynamically dead 8254 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8255 // adding uses of an otherwise potentially dead instruction. 8256 if (OrigLoop->isLoopExiting(Src)) 8257 return EdgeMaskCache[Edge] = SrcMask; 8258 8259 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8260 assert(EdgeMask && "No Edge Mask found for condition"); 8261 8262 if (BI->getSuccessor(0) != Dst) 8263 EdgeMask = Builder.createNot(EdgeMask); 8264 8265 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 8266 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 8267 8268 return EdgeMaskCache[Edge] = EdgeMask; 8269 } 8270 8271 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8272 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8273 8274 // Look for cached value. 8275 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8276 if (BCEntryIt != BlockMaskCache.end()) 8277 return BCEntryIt->second; 8278 8279 // All-one mask is modelled as no-mask following the convention for masked 8280 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8281 VPValue *BlockMask = nullptr; 8282 8283 if (OrigLoop->getHeader() == BB) { 8284 if (!CM.blockNeedsPredication(BB)) 8285 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8286 8287 // Create the block in mask as the first non-phi instruction in the block. 8288 VPBuilder::InsertPointGuard Guard(Builder); 8289 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8290 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8291 8292 // Introduce the early-exit compare IV <= BTC to form header block mask. 8293 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8294 // Start by constructing the desired canonical IV. 8295 VPValue *IV = nullptr; 8296 if (Legal->getPrimaryInduction()) 8297 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8298 else { 8299 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8300 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8301 IV = IVRecipe->getVPValue(); 8302 } 8303 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8304 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8305 8306 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8307 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8308 // as a second argument, we only pass the IV here and extract the 8309 // tripcount from the transform state where codegen of the VP instructions 8310 // happen. 8311 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8312 } else { 8313 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8314 } 8315 return BlockMaskCache[BB] = BlockMask; 8316 } 8317 8318 // This is the block mask. We OR all incoming edges. 8319 for (auto *Predecessor : predecessors(BB)) { 8320 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8321 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8322 return BlockMaskCache[BB] = EdgeMask; 8323 8324 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8325 BlockMask = EdgeMask; 8326 continue; 8327 } 8328 8329 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8330 } 8331 8332 return BlockMaskCache[BB] = BlockMask; 8333 } 8334 8335 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 8336 VPlanPtr &Plan) { 8337 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8338 "Must be called with either a load or store"); 8339 8340 auto willWiden = [&](ElementCount VF) -> bool { 8341 if (VF.isScalar()) 8342 return false; 8343 LoopVectorizationCostModel::InstWidening Decision = 8344 CM.getWideningDecision(I, VF); 8345 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8346 "CM decision should be taken at this point."); 8347 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8348 return true; 8349 if (CM.isScalarAfterVectorization(I, VF) || 8350 CM.isProfitableToScalarize(I, VF)) 8351 return false; 8352 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8353 }; 8354 8355 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8356 return nullptr; 8357 8358 VPValue *Mask = nullptr; 8359 if (Legal->isMaskRequired(I)) 8360 Mask = createBlockInMask(I->getParent(), Plan); 8361 8362 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 8363 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8364 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 8365 8366 StoreInst *Store = cast<StoreInst>(I); 8367 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 8368 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 8369 } 8370 8371 VPWidenIntOrFpInductionRecipe * 8372 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, VPlan &Plan) const { 8373 // Check if this is an integer or fp induction. If so, build the recipe that 8374 // produces its scalar and vector values. 8375 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8376 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8377 II.getKind() == InductionDescriptor::IK_FpInduction) { 8378 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8379 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8380 return new VPWidenIntOrFpInductionRecipe( 8381 Phi, Start, Casts.empty() ? nullptr : Casts.front()); 8382 } 8383 8384 return nullptr; 8385 } 8386 8387 VPWidenIntOrFpInductionRecipe * 8388 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, VFRange &Range, 8389 VPlan &Plan) const { 8390 // Optimize the special case where the source is a constant integer 8391 // induction variable. Notice that we can only optimize the 'trunc' case 8392 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8393 // (c) other casts depend on pointer size. 8394 8395 // Determine whether \p K is a truncation based on an induction variable that 8396 // can be optimized. 8397 auto isOptimizableIVTruncate = 8398 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8399 return [=](ElementCount VF) -> bool { 8400 return CM.isOptimizableIVTruncate(K, VF); 8401 }; 8402 }; 8403 8404 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8405 isOptimizableIVTruncate(I), Range)) { 8406 8407 InductionDescriptor II = 8408 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8409 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8410 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8411 Start, nullptr, I); 8412 } 8413 return nullptr; 8414 } 8415 8416 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 8417 // We know that all PHIs in non-header blocks are converted into selects, so 8418 // we don't have to worry about the insertion order and we can just use the 8419 // builder. At this point we generate the predication tree. There may be 8420 // duplications since this is a simple recursive scan, but future 8421 // optimizations will clean it up. 8422 8423 SmallVector<VPValue *, 2> Operands; 8424 unsigned NumIncoming = Phi->getNumIncomingValues(); 8425 for (unsigned In = 0; In < NumIncoming; In++) { 8426 VPValue *EdgeMask = 8427 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8428 assert((EdgeMask || NumIncoming == 1) && 8429 "Multiple predecessors with one having a full mask"); 8430 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 8431 if (EdgeMask) 8432 Operands.push_back(EdgeMask); 8433 } 8434 return new VPBlendRecipe(Phi, Operands); 8435 } 8436 8437 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 8438 VPlan &Plan) const { 8439 8440 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8441 [this, CI](ElementCount VF) { 8442 return CM.isScalarWithPredication(CI, VF); 8443 }, 8444 Range); 8445 8446 if (IsPredicated) 8447 return nullptr; 8448 8449 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8450 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8451 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8452 ID == Intrinsic::pseudoprobe || 8453 ID == Intrinsic::experimental_noalias_scope_decl)) 8454 return nullptr; 8455 8456 auto willWiden = [&](ElementCount VF) -> bool { 8457 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8458 // The following case may be scalarized depending on the VF. 8459 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8460 // version of the instruction. 8461 // Is it beneficial to perform intrinsic call compared to lib call? 8462 bool NeedToScalarize = false; 8463 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8464 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8465 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8466 assert(IntrinsicCost.isValid() && CallCost.isValid() && 8467 "Cannot have invalid costs while widening"); 8468 return UseVectorIntrinsic || !NeedToScalarize; 8469 }; 8470 8471 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8472 return nullptr; 8473 8474 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 8475 } 8476 8477 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8478 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8479 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8480 // Instruction should be widened, unless it is scalar after vectorization, 8481 // scalarization is profitable or it is predicated. 8482 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8483 return CM.isScalarAfterVectorization(I, VF) || 8484 CM.isProfitableToScalarize(I, VF) || 8485 CM.isScalarWithPredication(I, VF); 8486 }; 8487 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8488 Range); 8489 } 8490 8491 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 8492 auto IsVectorizableOpcode = [](unsigned Opcode) { 8493 switch (Opcode) { 8494 case Instruction::Add: 8495 case Instruction::And: 8496 case Instruction::AShr: 8497 case Instruction::BitCast: 8498 case Instruction::FAdd: 8499 case Instruction::FCmp: 8500 case Instruction::FDiv: 8501 case Instruction::FMul: 8502 case Instruction::FNeg: 8503 case Instruction::FPExt: 8504 case Instruction::FPToSI: 8505 case Instruction::FPToUI: 8506 case Instruction::FPTrunc: 8507 case Instruction::FRem: 8508 case Instruction::FSub: 8509 case Instruction::ICmp: 8510 case Instruction::IntToPtr: 8511 case Instruction::LShr: 8512 case Instruction::Mul: 8513 case Instruction::Or: 8514 case Instruction::PtrToInt: 8515 case Instruction::SDiv: 8516 case Instruction::Select: 8517 case Instruction::SExt: 8518 case Instruction::Shl: 8519 case Instruction::SIToFP: 8520 case Instruction::SRem: 8521 case Instruction::Sub: 8522 case Instruction::Trunc: 8523 case Instruction::UDiv: 8524 case Instruction::UIToFP: 8525 case Instruction::URem: 8526 case Instruction::Xor: 8527 case Instruction::ZExt: 8528 return true; 8529 } 8530 return false; 8531 }; 8532 8533 if (!IsVectorizableOpcode(I->getOpcode())) 8534 return nullptr; 8535 8536 // Success: widen this instruction. 8537 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 8538 } 8539 8540 VPBasicBlock *VPRecipeBuilder::handleReplication( 8541 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8542 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 8543 VPlanPtr &Plan) { 8544 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8545 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8546 Range); 8547 8548 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8549 [&](ElementCount VF) { return CM.isScalarWithPredication(I, VF); }, 8550 Range); 8551 8552 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8553 IsUniform, IsPredicated); 8554 setRecipe(I, Recipe); 8555 Plan->addVPValue(I, Recipe); 8556 8557 // Find if I uses a predicated instruction. If so, it will use its scalar 8558 // value. Avoid hoisting the insert-element which packs the scalar value into 8559 // a vector value, as that happens iff all users use the vector value. 8560 for (auto &Op : I->operands()) 8561 if (auto *PredInst = dyn_cast<Instruction>(Op)) 8562 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 8563 PredInst2Recipe[PredInst]->setAlsoPack(false); 8564 8565 // Finalize the recipe for Instr, first if it is not predicated. 8566 if (!IsPredicated) { 8567 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8568 VPBB->appendRecipe(Recipe); 8569 return VPBB; 8570 } 8571 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8572 assert(VPBB->getSuccessors().empty() && 8573 "VPBB has successors when handling predicated replication."); 8574 // Record predicated instructions for above packing optimizations. 8575 PredInst2Recipe[I] = Recipe; 8576 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8577 VPBlockUtils::insertBlockAfter(Region, VPBB); 8578 auto *RegSucc = new VPBasicBlock(); 8579 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8580 return RegSucc; 8581 } 8582 8583 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8584 VPRecipeBase *PredRecipe, 8585 VPlanPtr &Plan) { 8586 // Instructions marked for predication are replicated and placed under an 8587 // if-then construct to prevent side-effects. 8588 8589 // Generate recipes to compute the block mask for this region. 8590 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8591 8592 // Build the triangular if-then region. 8593 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8594 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8595 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8596 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8597 auto *PHIRecipe = Instr->getType()->isVoidTy() 8598 ? nullptr 8599 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8600 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8601 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8602 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8603 8604 // Note: first set Entry as region entry and then connect successors starting 8605 // from it in order, to propagate the "parent" of each VPBasicBlock. 8606 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8607 VPBlockUtils::connectBlocks(Pred, Exit); 8608 8609 return Region; 8610 } 8611 8612 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8613 VFRange &Range, 8614 VPlanPtr &Plan) { 8615 // First, check for specific widening recipes that deal with calls, memory 8616 // operations, inductions and Phi nodes. 8617 if (auto *CI = dyn_cast<CallInst>(Instr)) 8618 return tryToWidenCall(CI, Range, *Plan); 8619 8620 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8621 return tryToWidenMemory(Instr, Range, Plan); 8622 8623 VPRecipeBase *Recipe; 8624 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8625 if (Phi->getParent() != OrigLoop->getHeader()) 8626 return tryToBlend(Phi, Plan); 8627 if ((Recipe = tryToOptimizeInductionPHI(Phi, *Plan))) 8628 return Recipe; 8629 8630 if (Legal->isReductionVariable(Phi)) { 8631 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8632 VPValue *StartV = 8633 Plan->getOrAddVPValue(RdxDesc.getRecurrenceStartValue()); 8634 return new VPWidenPHIRecipe(Phi, RdxDesc, *StartV); 8635 } 8636 8637 return new VPWidenPHIRecipe(Phi); 8638 } 8639 8640 if (isa<TruncInst>(Instr) && (Recipe = tryToOptimizeInductionTruncate( 8641 cast<TruncInst>(Instr), Range, *Plan))) 8642 return Recipe; 8643 8644 if (!shouldWiden(Instr, Range)) 8645 return nullptr; 8646 8647 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8648 return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()), 8649 OrigLoop); 8650 8651 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8652 bool InvariantCond = 8653 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8654 return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()), 8655 InvariantCond); 8656 } 8657 8658 return tryToWiden(Instr, *Plan); 8659 } 8660 8661 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8662 ElementCount MaxVF) { 8663 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8664 8665 // Collect instructions from the original loop that will become trivially dead 8666 // in the vectorized loop. We don't need to vectorize these instructions. For 8667 // example, original induction update instructions can become dead because we 8668 // separately emit induction "steps" when generating code for the new loop. 8669 // Similarly, we create a new latch condition when setting up the structure 8670 // of the new loop, so the old one can become dead. 8671 SmallPtrSet<Instruction *, 4> DeadInstructions; 8672 collectTriviallyDeadInstructions(DeadInstructions); 8673 8674 // Add assume instructions we need to drop to DeadInstructions, to prevent 8675 // them from being added to the VPlan. 8676 // TODO: We only need to drop assumes in blocks that get flattend. If the 8677 // control flow is preserved, we should keep them. 8678 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8679 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8680 8681 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8682 // Dead instructions do not need sinking. Remove them from SinkAfter. 8683 for (Instruction *I : DeadInstructions) 8684 SinkAfter.erase(I); 8685 8686 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8687 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8688 VFRange SubRange = {VF, MaxVFPlusOne}; 8689 VPlans.push_back( 8690 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8691 VF = SubRange.End; 8692 } 8693 } 8694 8695 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8696 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8697 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 8698 8699 // Hold a mapping from predicated instructions to their recipes, in order to 8700 // fix their AlsoPack behavior if a user is determined to replicate and use a 8701 // scalar instead of vector value. 8702 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 8703 8704 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8705 8706 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8707 8708 // --------------------------------------------------------------------------- 8709 // Pre-construction: record ingredients whose recipes we'll need to further 8710 // process after constructing the initial VPlan. 8711 // --------------------------------------------------------------------------- 8712 8713 // Mark instructions we'll need to sink later and their targets as 8714 // ingredients whose recipe we'll need to record. 8715 for (auto &Entry : SinkAfter) { 8716 RecipeBuilder.recordRecipeOf(Entry.first); 8717 RecipeBuilder.recordRecipeOf(Entry.second); 8718 } 8719 for (auto &Reduction : CM.getInLoopReductionChains()) { 8720 PHINode *Phi = Reduction.first; 8721 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 8722 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8723 8724 RecipeBuilder.recordRecipeOf(Phi); 8725 for (auto &R : ReductionOperations) { 8726 RecipeBuilder.recordRecipeOf(R); 8727 // For min/max reducitons, where we have a pair of icmp/select, we also 8728 // need to record the ICmp recipe, so it can be removed later. 8729 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8730 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8731 } 8732 } 8733 8734 // For each interleave group which is relevant for this (possibly trimmed) 8735 // Range, add it to the set of groups to be later applied to the VPlan and add 8736 // placeholders for its members' Recipes which we'll be replacing with a 8737 // single VPInterleaveRecipe. 8738 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8739 auto applyIG = [IG, this](ElementCount VF) -> bool { 8740 return (VF.isVector() && // Query is illegal for VF == 1 8741 CM.getWideningDecision(IG->getInsertPos(), VF) == 8742 LoopVectorizationCostModel::CM_Interleave); 8743 }; 8744 if (!getDecisionAndClampRange(applyIG, Range)) 8745 continue; 8746 InterleaveGroups.insert(IG); 8747 for (unsigned i = 0; i < IG->getFactor(); i++) 8748 if (Instruction *Member = IG->getMember(i)) 8749 RecipeBuilder.recordRecipeOf(Member); 8750 }; 8751 8752 // --------------------------------------------------------------------------- 8753 // Build initial VPlan: Scan the body of the loop in a topological order to 8754 // visit each basic block after having visited its predecessor basic blocks. 8755 // --------------------------------------------------------------------------- 8756 8757 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 8758 auto Plan = std::make_unique<VPlan>(); 8759 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 8760 Plan->setEntry(VPBB); 8761 8762 // Scan the body of the loop in a topological order to visit each basic block 8763 // after having visited its predecessor basic blocks. 8764 LoopBlocksDFS DFS(OrigLoop); 8765 DFS.perform(LI); 8766 8767 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8768 // Relevant instructions from basic block BB will be grouped into VPRecipe 8769 // ingredients and fill a new VPBasicBlock. 8770 unsigned VPBBsForBB = 0; 8771 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 8772 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 8773 VPBB = FirstVPBBForBB; 8774 Builder.setInsertPoint(VPBB); 8775 8776 // Introduce each ingredient into VPlan. 8777 // TODO: Model and preserve debug instrinsics in VPlan. 8778 for (Instruction &I : BB->instructionsWithoutDebug()) { 8779 Instruction *Instr = &I; 8780 8781 // First filter out irrelevant instructions, to ensure no recipes are 8782 // built for them. 8783 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8784 continue; 8785 8786 if (auto Recipe = 8787 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 8788 for (auto *Def : Recipe->definedValues()) { 8789 auto *UV = Def->getUnderlyingValue(); 8790 Plan->addVPValue(UV, Def); 8791 } 8792 8793 RecipeBuilder.setRecipe(Instr, Recipe); 8794 VPBB->appendRecipe(Recipe); 8795 continue; 8796 } 8797 8798 // Otherwise, if all widening options failed, Instruction is to be 8799 // replicated. This may create a successor for VPBB. 8800 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 8801 Instr, Range, VPBB, PredInst2Recipe, Plan); 8802 if (NextVPBB != VPBB) { 8803 VPBB = NextVPBB; 8804 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8805 : ""); 8806 } 8807 } 8808 } 8809 8810 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 8811 // may also be empty, such as the last one VPBB, reflecting original 8812 // basic-blocks with no recipes. 8813 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 8814 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 8815 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 8816 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 8817 delete PreEntry; 8818 8819 // --------------------------------------------------------------------------- 8820 // Transform initial VPlan: Apply previously taken decisions, in order, to 8821 // bring the VPlan to its final state. 8822 // --------------------------------------------------------------------------- 8823 8824 // Apply Sink-After legal constraints. 8825 for (auto &Entry : SinkAfter) { 8826 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8827 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8828 // If the target is in a replication region, make sure to move Sink to the 8829 // block after it, not into the replication region itself. 8830 if (auto *Region = 8831 dyn_cast_or_null<VPRegionBlock>(Target->getParent()->getParent())) { 8832 if (Region->isReplicator()) { 8833 assert(Region->getNumSuccessors() == 1 && "Expected SESE region!"); 8834 VPBasicBlock *NextBlock = 8835 cast<VPBasicBlock>(Region->getSuccessors().front()); 8836 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8837 continue; 8838 } 8839 } 8840 Sink->moveAfter(Target); 8841 } 8842 8843 // Interleave memory: for each Interleave Group we marked earlier as relevant 8844 // for this VPlan, replace the Recipes widening its memory instructions with a 8845 // single VPInterleaveRecipe at its insertion point. 8846 for (auto IG : InterleaveGroups) { 8847 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8848 RecipeBuilder.getRecipe(IG->getInsertPos())); 8849 SmallVector<VPValue *, 4> StoredValues; 8850 for (unsigned i = 0; i < IG->getFactor(); ++i) 8851 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) 8852 StoredValues.push_back(Plan->getOrAddVPValue(SI->getOperand(0))); 8853 8854 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 8855 Recipe->getMask()); 8856 VPIG->insertBefore(Recipe); 8857 unsigned J = 0; 8858 for (unsigned i = 0; i < IG->getFactor(); ++i) 8859 if (Instruction *Member = IG->getMember(i)) { 8860 if (!Member->getType()->isVoidTy()) { 8861 VPValue *OriginalV = Plan->getVPValue(Member); 8862 Plan->removeVPValueFor(Member); 8863 Plan->addVPValue(Member, VPIG->getVPValue(J)); 8864 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 8865 J++; 8866 } 8867 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 8868 } 8869 } 8870 8871 // Adjust the recipes for any inloop reductions. 8872 if (Range.Start.isVector()) 8873 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 8874 8875 // Finally, if tail is folded by masking, introduce selects between the phi 8876 // and the live-out instruction of each reduction, at the end of the latch. 8877 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 8878 Builder.setInsertPoint(VPBB); 8879 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 8880 for (auto &Reduction : Legal->getReductionVars()) { 8881 if (CM.isInLoopReduction(Reduction.first)) 8882 continue; 8883 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 8884 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 8885 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 8886 } 8887 } 8888 8889 std::string PlanName; 8890 raw_string_ostream RSO(PlanName); 8891 ElementCount VF = Range.Start; 8892 Plan->addVF(VF); 8893 RSO << "Initial VPlan for VF={" << VF; 8894 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 8895 Plan->addVF(VF); 8896 RSO << "," << VF; 8897 } 8898 RSO << "},UF>=1"; 8899 RSO.flush(); 8900 Plan->setName(PlanName); 8901 8902 return Plan; 8903 } 8904 8905 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 8906 // Outer loop handling: They may require CFG and instruction level 8907 // transformations before even evaluating whether vectorization is profitable. 8908 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8909 // the vectorization pipeline. 8910 assert(!OrigLoop->isInnermost()); 8911 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8912 8913 // Create new empty VPlan 8914 auto Plan = std::make_unique<VPlan>(); 8915 8916 // Build hierarchical CFG 8917 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 8918 HCFGBuilder.buildHierarchicalCFG(); 8919 8920 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 8921 VF *= 2) 8922 Plan->addVF(VF); 8923 8924 if (EnableVPlanPredication) { 8925 VPlanPredicator VPP(*Plan); 8926 VPP.predicate(); 8927 8928 // Avoid running transformation to recipes until masked code generation in 8929 // VPlan-native path is in place. 8930 return Plan; 8931 } 8932 8933 SmallPtrSet<Instruction *, 1> DeadInstructions; 8934 VPlanTransforms::VPInstructionsToVPRecipes( 8935 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 8936 return Plan; 8937 } 8938 8939 // Adjust the recipes for any inloop reductions. The chain of instructions 8940 // leading from the loop exit instr to the phi need to be converted to 8941 // reductions, with one operand being vector and the other being the scalar 8942 // reduction chain. 8943 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 8944 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 8945 for (auto &Reduction : CM.getInLoopReductionChains()) { 8946 PHINode *Phi = Reduction.first; 8947 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 8948 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8949 8950 // ReductionOperations are orders top-down from the phi's use to the 8951 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 8952 // which of the two operands will remain scalar and which will be reduced. 8953 // For minmax the chain will be the select instructions. 8954 Instruction *Chain = Phi; 8955 for (Instruction *R : ReductionOperations) { 8956 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 8957 RecurKind Kind = RdxDesc.getRecurrenceKind(); 8958 8959 VPValue *ChainOp = Plan->getVPValue(Chain); 8960 unsigned FirstOpId; 8961 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8962 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 8963 "Expected to replace a VPWidenSelectSC"); 8964 FirstOpId = 1; 8965 } else { 8966 assert(isa<VPWidenRecipe>(WidenRecipe) && 8967 "Expected to replace a VPWidenSC"); 8968 FirstOpId = 0; 8969 } 8970 unsigned VecOpId = 8971 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 8972 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 8973 8974 auto *CondOp = CM.foldTailByMasking() 8975 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 8976 : nullptr; 8977 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 8978 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 8979 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8980 Plan->removeVPValueFor(R); 8981 Plan->addVPValue(R, RedRecipe); 8982 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 8983 WidenRecipe->getVPValue()->replaceAllUsesWith(RedRecipe); 8984 WidenRecipe->eraseFromParent(); 8985 8986 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 8987 VPRecipeBase *CompareRecipe = 8988 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 8989 assert(isa<VPWidenRecipe>(CompareRecipe) && 8990 "Expected to replace a VPWidenSC"); 8991 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 8992 "Expected no remaining users"); 8993 CompareRecipe->eraseFromParent(); 8994 } 8995 Chain = R; 8996 } 8997 } 8998 } 8999 9000 Value* LoopVectorizationPlanner::VPCallbackILV:: 9001 getOrCreateVectorValues(Value *V, unsigned Part) { 9002 return ILV.getOrCreateVectorValue(V, Part); 9003 } 9004 9005 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue( 9006 Value *V, const VPIteration &Instance) { 9007 return ILV.getOrCreateScalarValue(V, Instance); 9008 } 9009 9010 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9011 VPSlotTracker &SlotTracker) const { 9012 O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9013 IG->getInsertPos()->printAsOperand(O, false); 9014 O << ", "; 9015 getAddr()->printAsOperand(O, SlotTracker); 9016 VPValue *Mask = getMask(); 9017 if (Mask) { 9018 O << ", "; 9019 Mask->printAsOperand(O, SlotTracker); 9020 } 9021 for (unsigned i = 0; i < IG->getFactor(); ++i) 9022 if (Instruction *I = IG->getMember(i)) 9023 O << "\\l\" +\n" << Indent << "\" " << VPlanIngredient(I) << " " << i; 9024 } 9025 9026 void VPWidenCallRecipe::execute(VPTransformState &State) { 9027 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9028 *this, State); 9029 } 9030 9031 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9032 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9033 this, *this, InvariantCond, State); 9034 } 9035 9036 void VPWidenRecipe::execute(VPTransformState &State) { 9037 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9038 } 9039 9040 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9041 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9042 *this, State.UF, State.VF, IsPtrLoopInvariant, 9043 IsIndexLoopInvariant, State); 9044 } 9045 9046 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9047 assert(!State.Instance && "Int or FP induction being replicated."); 9048 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9049 getTruncInst(), getVPValue(0), 9050 getCastValue(), State); 9051 } 9052 9053 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9054 Value *StartV = 9055 getStartValue() ? getStartValue()->getLiveInIRValue() : nullptr; 9056 State.ILV->widenPHIInstruction(Phi, RdxDesc, StartV, State.UF, State.VF); 9057 } 9058 9059 void VPBlendRecipe::execute(VPTransformState &State) { 9060 State.ILV->setDebugLocFromInst(State.Builder, Phi); 9061 // We know that all PHIs in non-header blocks are converted into 9062 // selects, so we don't have to worry about the insertion order and we 9063 // can just use the builder. 9064 // At this point we generate the predication tree. There may be 9065 // duplications since this is a simple recursive scan, but future 9066 // optimizations will clean it up. 9067 9068 unsigned NumIncoming = getNumIncomingValues(); 9069 9070 // Generate a sequence of selects of the form: 9071 // SELECT(Mask3, In3, 9072 // SELECT(Mask2, In2, 9073 // SELECT(Mask1, In1, 9074 // In0))) 9075 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9076 // are essentially undef are taken from In0. 9077 InnerLoopVectorizer::VectorParts Entry(State.UF); 9078 for (unsigned In = 0; In < NumIncoming; ++In) { 9079 for (unsigned Part = 0; Part < State.UF; ++Part) { 9080 // We might have single edge PHIs (blocks) - use an identity 9081 // 'select' for the first PHI operand. 9082 Value *In0 = State.get(getIncomingValue(In), Part); 9083 if (In == 0) 9084 Entry[Part] = In0; // Initialize with the first incoming value. 9085 else { 9086 // Select between the current value and the previous incoming edge 9087 // based on the incoming mask. 9088 Value *Cond = State.get(getMask(In), Part); 9089 Entry[Part] = 9090 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9091 } 9092 } 9093 } 9094 for (unsigned Part = 0; Part < State.UF; ++Part) 9095 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 9096 } 9097 9098 void VPInterleaveRecipe::execute(VPTransformState &State) { 9099 assert(!State.Instance && "Interleave group being replicated."); 9100 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9101 getStoredValues(), getMask()); 9102 } 9103 9104 void VPReductionRecipe::execute(VPTransformState &State) { 9105 assert(!State.Instance && "Reduction being replicated."); 9106 for (unsigned Part = 0; Part < State.UF; ++Part) { 9107 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9108 Value *NewVecOp = State.get(getVecOp(), Part); 9109 if (VPValue *Cond = getCondOp()) { 9110 Value *NewCond = State.get(Cond, Part); 9111 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9112 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9113 Kind, VecTy->getElementType()); 9114 Constant *IdenVec = 9115 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9116 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9117 NewVecOp = Select; 9118 } 9119 Value *NewRed = 9120 createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9121 Value *PrevInChain = State.get(getChainOp(), Part); 9122 Value *NextInChain; 9123 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9124 NextInChain = 9125 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9126 NewRed, PrevInChain); 9127 } else { 9128 NextInChain = State.Builder.CreateBinOp( 9129 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9130 PrevInChain); 9131 } 9132 State.set(this, getUnderlyingInstr(), NextInChain, Part); 9133 } 9134 } 9135 9136 void VPReplicateRecipe::execute(VPTransformState &State) { 9137 if (State.Instance) { // Generate a single instance. 9138 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9139 State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this, 9140 *State.Instance, IsPredicated, State); 9141 // Insert scalar instance packing it into a vector. 9142 if (AlsoPack && State.VF.isVector()) { 9143 // If we're constructing lane 0, initialize to start from poison. 9144 if (State.Instance->Lane == 0) { 9145 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9146 Value *Poison = PoisonValue::get( 9147 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9148 State.ValueMap.setVectorValue(getUnderlyingInstr(), 9149 State.Instance->Part, Poison); 9150 } 9151 State.ILV->packScalarIntoVectorValue(getUnderlyingInstr(), 9152 *State.Instance); 9153 } 9154 return; 9155 } 9156 9157 // Generate scalar instances for all VF lanes of all UF parts, unless the 9158 // instruction is uniform inwhich case generate only the first lane for each 9159 // of the UF parts. 9160 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9161 assert((!State.VF.isScalable() || IsUniform) && 9162 "Can't scalarize a scalable vector"); 9163 for (unsigned Part = 0; Part < State.UF; ++Part) 9164 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9165 State.ILV->scalarizeInstruction(getUnderlyingInstr(), *this, 9166 VPIteration(Part, Lane), IsPredicated, 9167 State); 9168 } 9169 9170 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9171 assert(State.Instance && "Branch on Mask works only on single instance."); 9172 9173 unsigned Part = State.Instance->Part; 9174 unsigned Lane = State.Instance->Lane; 9175 9176 Value *ConditionBit = nullptr; 9177 VPValue *BlockInMask = getMask(); 9178 if (BlockInMask) { 9179 ConditionBit = State.get(BlockInMask, Part); 9180 if (ConditionBit->getType()->isVectorTy()) 9181 ConditionBit = State.Builder.CreateExtractElement( 9182 ConditionBit, State.Builder.getInt32(Lane)); 9183 } else // Block in mask is all-one. 9184 ConditionBit = State.Builder.getTrue(); 9185 9186 // Replace the temporary unreachable terminator with a new conditional branch, 9187 // whose two destinations will be set later when they are created. 9188 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9189 assert(isa<UnreachableInst>(CurrentTerminator) && 9190 "Expected to replace unreachable terminator with conditional branch."); 9191 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9192 CondBr->setSuccessor(0, nullptr); 9193 ReplaceInstWithInst(CurrentTerminator, CondBr); 9194 } 9195 9196 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9197 assert(State.Instance && "Predicated instruction PHI works per instance."); 9198 Instruction *ScalarPredInst = 9199 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9200 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9201 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9202 assert(PredicatingBB && "Predicated block has no single predecessor."); 9203 9204 // By current pack/unpack logic we need to generate only a single phi node: if 9205 // a vector value for the predicated instruction exists at this point it means 9206 // the instruction has vector users only, and a phi for the vector value is 9207 // needed. In this case the recipe of the predicated instruction is marked to 9208 // also do that packing, thereby "hoisting" the insert-element sequence. 9209 // Otherwise, a phi node for the scalar value is needed. 9210 unsigned Part = State.Instance->Part; 9211 Instruction *PredInst = 9212 cast<Instruction>(getOperand(0)->getUnderlyingValue()); 9213 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 9214 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 9215 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9216 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9217 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9218 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9219 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 9220 } else { 9221 Type *PredInstType = PredInst->getType(); 9222 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9223 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), PredicatingBB); 9224 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9225 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 9226 } 9227 } 9228 9229 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9230 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9231 State.ILV->vectorizeMemoryInstruction(&Ingredient, State, 9232 StoredValue ? nullptr : getVPValue(), 9233 getAddr(), StoredValue, getMask()); 9234 } 9235 9236 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9237 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9238 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9239 // for predication. 9240 static ScalarEpilogueLowering getScalarEpilogueLowering( 9241 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9242 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9243 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9244 LoopVectorizationLegality &LVL) { 9245 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9246 // don't look at hints or options, and don't request a scalar epilogue. 9247 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9248 // LoopAccessInfo (due to code dependency and not being able to reliably get 9249 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9250 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9251 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9252 // back to the old way and vectorize with versioning when forced. See D81345.) 9253 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9254 PGSOQueryType::IRPass) && 9255 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9256 return CM_ScalarEpilogueNotAllowedOptSize; 9257 9258 // 2) If set, obey the directives 9259 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9260 switch (PreferPredicateOverEpilogue) { 9261 case PreferPredicateTy::ScalarEpilogue: 9262 return CM_ScalarEpilogueAllowed; 9263 case PreferPredicateTy::PredicateElseScalarEpilogue: 9264 return CM_ScalarEpilogueNotNeededUsePredicate; 9265 case PreferPredicateTy::PredicateOrDontVectorize: 9266 return CM_ScalarEpilogueNotAllowedUsePredicate; 9267 }; 9268 } 9269 9270 // 3) If set, obey the hints 9271 switch (Hints.getPredicate()) { 9272 case LoopVectorizeHints::FK_Enabled: 9273 return CM_ScalarEpilogueNotNeededUsePredicate; 9274 case LoopVectorizeHints::FK_Disabled: 9275 return CM_ScalarEpilogueAllowed; 9276 }; 9277 9278 // 4) if the TTI hook indicates this is profitable, request predication. 9279 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9280 LVL.getLAI())) 9281 return CM_ScalarEpilogueNotNeededUsePredicate; 9282 9283 return CM_ScalarEpilogueAllowed; 9284 } 9285 9286 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V, 9287 const VPIteration &Instance) { 9288 set(Def, V, Instance); 9289 ILV->setScalarValue(IRDef, Instance, V); 9290 } 9291 9292 void VPTransformState::set(VPValue *Def, Value *IRDef, Value *V, 9293 unsigned Part) { 9294 set(Def, V, Part); 9295 ILV->setVectorValue(IRDef, Part, V); 9296 } 9297 9298 void VPTransformState::reset(VPValue *Def, Value *IRDef, Value *V, 9299 unsigned Part) { 9300 set(Def, V, Part); 9301 ILV->resetVectorValue(IRDef, Part, V); 9302 } 9303 9304 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9305 // If Values have been set for this Def return the one relevant for \p Part. 9306 if (hasVectorValue(Def, Part)) 9307 return Data.PerPartOutput[Def][Part]; 9308 9309 // TODO: Remove the callback once all scalar recipes are managed using 9310 // VPValues. 9311 if (!hasScalarValue(Def, {Part, 0})) 9312 return Callback.getOrCreateVectorValues(VPValue2Value[Def], Part); 9313 9314 Value *ScalarValue = get(Def, {Part, 0}); 9315 // If we aren't vectorizing, we can just copy the scalar map values over 9316 // to the vector map. 9317 if (VF.isScalar()) { 9318 set(Def, ScalarValue, Part); 9319 return ScalarValue; 9320 } 9321 9322 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9323 bool IsUniform = RepR && RepR->isUniform(); 9324 9325 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9326 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9327 9328 // Set the insert point after the last scalarized instruction. This 9329 // ensures the insertelement sequence will directly follow the scalar 9330 // definitions. 9331 auto OldIP = Builder.saveIP(); 9332 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 9333 Builder.SetInsertPoint(&*NewIP); 9334 9335 // However, if we are vectorizing, we need to construct the vector values. 9336 // If the value is known to be uniform after vectorization, we can just 9337 // broadcast the scalar value corresponding to lane zero for each unroll 9338 // iteration. Otherwise, we construct the vector values using 9339 // insertelement instructions. Since the resulting vectors are stored in 9340 // VectorLoopValueMap, we will only generate the insertelements once. 9341 Value *VectorValue = nullptr; 9342 if (IsUniform) { 9343 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9344 set(Def, VectorValue, Part); 9345 } else { 9346 // Initialize packing with insertelements to start from undef. 9347 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9348 Value *Undef = UndefValue::get(VectorType::get(LastInst->getType(), VF)); 9349 set(Def, Undef, Part); 9350 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9351 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9352 VectorValue = get(Def, Part); 9353 } 9354 Builder.restoreIP(OldIP); 9355 return VectorValue; 9356 } 9357 9358 // Process the loop in the VPlan-native vectorization path. This path builds 9359 // VPlan upfront in the vectorization pipeline, which allows to apply 9360 // VPlan-to-VPlan transformations from the very beginning without modifying the 9361 // input LLVM IR. 9362 static bool processLoopInVPlanNativePath( 9363 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9364 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9365 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9366 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9367 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 9368 9369 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9370 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9371 return false; 9372 } 9373 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9374 Function *F = L->getHeader()->getParent(); 9375 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9376 9377 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9378 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9379 9380 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9381 &Hints, IAI); 9382 // Use the planner for outer loop vectorization. 9383 // TODO: CM is not used at this point inside the planner. Turn CM into an 9384 // optional argument if we don't need it in the future. 9385 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 9386 9387 // Get user vectorization factor. 9388 ElementCount UserVF = Hints.getWidth(); 9389 9390 // Plan how to best vectorize, return the best VF and its cost. 9391 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9392 9393 // If we are stress testing VPlan builds, do not attempt to generate vector 9394 // code. Masked vector code generation support will follow soon. 9395 // Also, do not attempt to vectorize if no vector code will be produced. 9396 if (VPlanBuildStressTest || EnableVPlanPredication || 9397 VectorizationFactor::Disabled() == VF) 9398 return false; 9399 9400 LVP.setBestPlan(VF.Width, 1); 9401 9402 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9403 &CM, BFI, PSI); 9404 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9405 << L->getHeader()->getParent()->getName() << "\"\n"); 9406 LVP.executePlan(LB, DT); 9407 9408 // Mark the loop as already vectorized to avoid vectorizing again. 9409 Hints.setAlreadyVectorized(); 9410 9411 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9412 return true; 9413 } 9414 9415 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9416 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9417 !EnableLoopInterleaving), 9418 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9419 !EnableLoopVectorization) {} 9420 9421 bool LoopVectorizePass::processLoop(Loop *L) { 9422 assert((EnableVPlanNativePath || L->isInnermost()) && 9423 "VPlan-native path is not enabled. Only process inner loops."); 9424 9425 #ifndef NDEBUG 9426 const std::string DebugLocStr = getDebugLocString(L); 9427 #endif /* NDEBUG */ 9428 9429 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 9430 << L->getHeader()->getParent()->getName() << "\" from " 9431 << DebugLocStr << "\n"); 9432 9433 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 9434 9435 LLVM_DEBUG( 9436 dbgs() << "LV: Loop hints:" 9437 << " force=" 9438 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 9439 ? "disabled" 9440 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 9441 ? "enabled" 9442 : "?")) 9443 << " width=" << Hints.getWidth() 9444 << " unroll=" << Hints.getInterleave() << "\n"); 9445 9446 // Function containing loop 9447 Function *F = L->getHeader()->getParent(); 9448 9449 // Looking at the diagnostic output is the only way to determine if a loop 9450 // was vectorized (other than looking at the IR or machine code), so it 9451 // is important to generate an optimization remark for each loop. Most of 9452 // these messages are generated as OptimizationRemarkAnalysis. Remarks 9453 // generated as OptimizationRemark and OptimizationRemarkMissed are 9454 // less verbose reporting vectorized loops and unvectorized loops that may 9455 // benefit from vectorization, respectively. 9456 9457 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 9458 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 9459 return false; 9460 } 9461 9462 PredicatedScalarEvolution PSE(*SE, *L); 9463 9464 // Check if it is legal to vectorize the loop. 9465 LoopVectorizationRequirements Requirements(*ORE); 9466 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 9467 &Requirements, &Hints, DB, AC, BFI, PSI); 9468 if (!LVL.canVectorize(EnableVPlanNativePath)) { 9469 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 9470 Hints.emitRemarkWithHints(); 9471 return false; 9472 } 9473 9474 // Check the function attributes and profiles to find out if this function 9475 // should be optimized for size. 9476 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9477 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 9478 9479 // Entrance to the VPlan-native vectorization path. Outer loops are processed 9480 // here. They may require CFG and instruction level transformations before 9481 // even evaluating whether vectorization is profitable. Since we cannot modify 9482 // the incoming IR, we need to build VPlan upfront in the vectorization 9483 // pipeline. 9484 if (!L->isInnermost()) 9485 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 9486 ORE, BFI, PSI, Hints); 9487 9488 assert(L->isInnermost() && "Inner loop expected."); 9489 9490 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 9491 // count by optimizing for size, to minimize overheads. 9492 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 9493 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 9494 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 9495 << "This loop is worth vectorizing only if no scalar " 9496 << "iteration overheads are incurred."); 9497 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 9498 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 9499 else { 9500 LLVM_DEBUG(dbgs() << "\n"); 9501 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 9502 } 9503 } 9504 9505 // Check the function attributes to see if implicit floats are allowed. 9506 // FIXME: This check doesn't seem possibly correct -- what if the loop is 9507 // an integer loop and the vector instructions selected are purely integer 9508 // vector instructions? 9509 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 9510 reportVectorizationFailure( 9511 "Can't vectorize when the NoImplicitFloat attribute is used", 9512 "loop not vectorized due to NoImplicitFloat attribute", 9513 "NoImplicitFloat", ORE, L); 9514 Hints.emitRemarkWithHints(); 9515 return false; 9516 } 9517 9518 // Check if the target supports potentially unsafe FP vectorization. 9519 // FIXME: Add a check for the type of safety issue (denormal, signaling) 9520 // for the target we're vectorizing for, to make sure none of the 9521 // additional fp-math flags can help. 9522 if (Hints.isPotentiallyUnsafe() && 9523 TTI->isFPVectorizationPotentiallyUnsafe()) { 9524 reportVectorizationFailure( 9525 "Potentially unsafe FP op prevents vectorization", 9526 "loop not vectorized due to unsafe FP support.", 9527 "UnsafeFP", ORE, L); 9528 Hints.emitRemarkWithHints(); 9529 return false; 9530 } 9531 9532 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 9533 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 9534 9535 // If an override option has been passed in for interleaved accesses, use it. 9536 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 9537 UseInterleaved = EnableInterleavedMemAccesses; 9538 9539 // Analyze interleaved memory accesses. 9540 if (UseInterleaved) { 9541 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 9542 } 9543 9544 // Use the cost model. 9545 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 9546 F, &Hints, IAI); 9547 CM.collectValuesToIgnore(); 9548 9549 // Use the planner for vectorization. 9550 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 9551 9552 // Get user vectorization factor and interleave count. 9553 ElementCount UserVF = Hints.getWidth(); 9554 unsigned UserIC = Hints.getInterleave(); 9555 9556 // Plan how to best vectorize, return the best VF and its cost. 9557 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 9558 9559 VectorizationFactor VF = VectorizationFactor::Disabled(); 9560 unsigned IC = 1; 9561 9562 if (MaybeVF) { 9563 VF = *MaybeVF; 9564 // Select the interleave count. 9565 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 9566 } 9567 9568 // Identify the diagnostic messages that should be produced. 9569 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 9570 bool VectorizeLoop = true, InterleaveLoop = true; 9571 if (Requirements.doesNotMeet(F, L, Hints)) { 9572 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 9573 "requirements.\n"); 9574 Hints.emitRemarkWithHints(); 9575 return false; 9576 } 9577 9578 if (VF.Width.isScalar()) { 9579 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 9580 VecDiagMsg = std::make_pair( 9581 "VectorizationNotBeneficial", 9582 "the cost-model indicates that vectorization is not beneficial"); 9583 VectorizeLoop = false; 9584 } 9585 9586 if (!MaybeVF && UserIC > 1) { 9587 // Tell the user interleaving was avoided up-front, despite being explicitly 9588 // requested. 9589 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 9590 "interleaving should be avoided up front\n"); 9591 IntDiagMsg = std::make_pair( 9592 "InterleavingAvoided", 9593 "Ignoring UserIC, because interleaving was avoided up front"); 9594 InterleaveLoop = false; 9595 } else if (IC == 1 && UserIC <= 1) { 9596 // Tell the user interleaving is not beneficial. 9597 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 9598 IntDiagMsg = std::make_pair( 9599 "InterleavingNotBeneficial", 9600 "the cost-model indicates that interleaving is not beneficial"); 9601 InterleaveLoop = false; 9602 if (UserIC == 1) { 9603 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 9604 IntDiagMsg.second += 9605 " and is explicitly disabled or interleave count is set to 1"; 9606 } 9607 } else if (IC > 1 && UserIC == 1) { 9608 // Tell the user interleaving is beneficial, but it explicitly disabled. 9609 LLVM_DEBUG( 9610 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 9611 IntDiagMsg = std::make_pair( 9612 "InterleavingBeneficialButDisabled", 9613 "the cost-model indicates that interleaving is beneficial " 9614 "but is explicitly disabled or interleave count is set to 1"); 9615 InterleaveLoop = false; 9616 } 9617 9618 // Override IC if user provided an interleave count. 9619 IC = UserIC > 0 ? UserIC : IC; 9620 9621 // Emit diagnostic messages, if any. 9622 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 9623 if (!VectorizeLoop && !InterleaveLoop) { 9624 // Do not vectorize or interleaving the loop. 9625 ORE->emit([&]() { 9626 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 9627 L->getStartLoc(), L->getHeader()) 9628 << VecDiagMsg.second; 9629 }); 9630 ORE->emit([&]() { 9631 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 9632 L->getStartLoc(), L->getHeader()) 9633 << IntDiagMsg.second; 9634 }); 9635 return false; 9636 } else if (!VectorizeLoop && InterleaveLoop) { 9637 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9638 ORE->emit([&]() { 9639 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 9640 L->getStartLoc(), L->getHeader()) 9641 << VecDiagMsg.second; 9642 }); 9643 } else if (VectorizeLoop && !InterleaveLoop) { 9644 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9645 << ") in " << DebugLocStr << '\n'); 9646 ORE->emit([&]() { 9647 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 9648 L->getStartLoc(), L->getHeader()) 9649 << IntDiagMsg.second; 9650 }); 9651 } else if (VectorizeLoop && InterleaveLoop) { 9652 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 9653 << ") in " << DebugLocStr << '\n'); 9654 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 9655 } 9656 9657 LVP.setBestPlan(VF.Width, IC); 9658 9659 using namespace ore; 9660 bool DisableRuntimeUnroll = false; 9661 MDNode *OrigLoopID = L->getLoopID(); 9662 9663 if (!VectorizeLoop) { 9664 assert(IC > 1 && "interleave count should not be 1 or 0"); 9665 // If we decided that it is not legal to vectorize the loop, then 9666 // interleave it. 9667 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM, 9668 BFI, PSI); 9669 LVP.executePlan(Unroller, DT); 9670 9671 ORE->emit([&]() { 9672 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 9673 L->getHeader()) 9674 << "interleaved loop (interleaved count: " 9675 << NV("InterleaveCount", IC) << ")"; 9676 }); 9677 } else { 9678 // If we decided that it is *legal* to vectorize the loop, then do it. 9679 9680 // Consider vectorizing the epilogue too if it's profitable. 9681 VectorizationFactor EpilogueVF = 9682 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 9683 if (EpilogueVF.Width.isVector()) { 9684 9685 // The first pass vectorizes the main loop and creates a scalar epilogue 9686 // to be vectorized by executing the plan (potentially with a different 9687 // factor) again shortly afterwards. 9688 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 9689 EpilogueVF.Width.getKnownMinValue(), 1); 9690 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, EPI, 9691 &LVL, &CM, BFI, PSI); 9692 9693 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 9694 LVP.executePlan(MainILV, DT); 9695 ++LoopsVectorized; 9696 9697 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9698 formLCSSARecursively(*L, *DT, LI, SE); 9699 9700 // Second pass vectorizes the epilogue and adjusts the control flow 9701 // edges from the first pass. 9702 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 9703 EPI.MainLoopVF = EPI.EpilogueVF; 9704 EPI.MainLoopUF = EPI.EpilogueUF; 9705 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 9706 ORE, EPI, &LVL, &CM, BFI, PSI); 9707 LVP.executePlan(EpilogILV, DT); 9708 ++LoopsEpilogueVectorized; 9709 9710 if (!MainILV.areSafetyChecksAdded()) 9711 DisableRuntimeUnroll = true; 9712 } else { 9713 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 9714 &LVL, &CM, BFI, PSI); 9715 LVP.executePlan(LB, DT); 9716 ++LoopsVectorized; 9717 9718 // Add metadata to disable runtime unrolling a scalar loop when there are 9719 // no runtime checks about strides and memory. A scalar loop that is 9720 // rarely used is not worth unrolling. 9721 if (!LB.areSafetyChecksAdded()) 9722 DisableRuntimeUnroll = true; 9723 } 9724 9725 // Report the vectorization decision. 9726 ORE->emit([&]() { 9727 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 9728 L->getHeader()) 9729 << "vectorized loop (vectorization width: " 9730 << NV("VectorizationFactor", VF.Width) 9731 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 9732 }); 9733 } 9734 9735 Optional<MDNode *> RemainderLoopID = 9736 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 9737 LLVMLoopVectorizeFollowupEpilogue}); 9738 if (RemainderLoopID.hasValue()) { 9739 L->setLoopID(RemainderLoopID.getValue()); 9740 } else { 9741 if (DisableRuntimeUnroll) 9742 AddRuntimeUnrollDisableMetaData(L); 9743 9744 // Mark the loop as already vectorized to avoid vectorizing again. 9745 Hints.setAlreadyVectorized(); 9746 } 9747 9748 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9749 return true; 9750 } 9751 9752 LoopVectorizeResult LoopVectorizePass::runImpl( 9753 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 9754 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 9755 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 9756 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 9757 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 9758 SE = &SE_; 9759 LI = &LI_; 9760 TTI = &TTI_; 9761 DT = &DT_; 9762 BFI = &BFI_; 9763 TLI = TLI_; 9764 AA = &AA_; 9765 AC = &AC_; 9766 GetLAA = &GetLAA_; 9767 DB = &DB_; 9768 ORE = &ORE_; 9769 PSI = PSI_; 9770 9771 // Don't attempt if 9772 // 1. the target claims to have no vector registers, and 9773 // 2. interleaving won't help ILP. 9774 // 9775 // The second condition is necessary because, even if the target has no 9776 // vector registers, loop vectorization may still enable scalar 9777 // interleaving. 9778 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 9779 TTI->getMaxInterleaveFactor(1) < 2) 9780 return LoopVectorizeResult(false, false); 9781 9782 bool Changed = false, CFGChanged = false; 9783 9784 // The vectorizer requires loops to be in simplified form. 9785 // Since simplification may add new inner loops, it has to run before the 9786 // legality and profitability checks. This means running the loop vectorizer 9787 // will simplify all loops, regardless of whether anything end up being 9788 // vectorized. 9789 for (auto &L : *LI) 9790 Changed |= CFGChanged |= 9791 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 9792 9793 // Build up a worklist of inner-loops to vectorize. This is necessary as 9794 // the act of vectorizing or partially unrolling a loop creates new loops 9795 // and can invalidate iterators across the loops. 9796 SmallVector<Loop *, 8> Worklist; 9797 9798 for (Loop *L : *LI) 9799 collectSupportedLoops(*L, LI, ORE, Worklist); 9800 9801 LoopsAnalyzed += Worklist.size(); 9802 9803 // Now walk the identified inner loops. 9804 while (!Worklist.empty()) { 9805 Loop *L = Worklist.pop_back_val(); 9806 9807 // For the inner loops we actually process, form LCSSA to simplify the 9808 // transform. 9809 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 9810 9811 Changed |= CFGChanged |= processLoop(L); 9812 } 9813 9814 // Process each loop nest in the function. 9815 return LoopVectorizeResult(Changed, CFGChanged); 9816 } 9817 9818 PreservedAnalyses LoopVectorizePass::run(Function &F, 9819 FunctionAnalysisManager &AM) { 9820 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 9821 auto &LI = AM.getResult<LoopAnalysis>(F); 9822 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 9823 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 9824 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 9825 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 9826 auto &AA = AM.getResult<AAManager>(F); 9827 auto &AC = AM.getResult<AssumptionAnalysis>(F); 9828 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 9829 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 9830 MemorySSA *MSSA = EnableMSSALoopDependency 9831 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 9832 : nullptr; 9833 9834 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 9835 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 9836 [&](Loop &L) -> const LoopAccessInfo & { 9837 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 9838 TLI, TTI, nullptr, MSSA}; 9839 return LAM.getResult<LoopAccessAnalysis>(L, AR); 9840 }; 9841 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 9842 ProfileSummaryInfo *PSI = 9843 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 9844 LoopVectorizeResult Result = 9845 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 9846 if (!Result.MadeAnyChange) 9847 return PreservedAnalyses::all(); 9848 PreservedAnalyses PA; 9849 9850 // We currently do not preserve loopinfo/dominator analyses with outer loop 9851 // vectorization. Until this is addressed, mark these analyses as preserved 9852 // only for non-VPlan-native path. 9853 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 9854 if (!EnableVPlanNativePath) { 9855 PA.preserve<LoopAnalysis>(); 9856 PA.preserve<DominatorTreeAnalysis>(); 9857 } 9858 PA.preserve<BasicAA>(); 9859 PA.preserve<GlobalsAA>(); 9860 if (!Result.MadeCFGChange) 9861 PA.preserveSet<CFGAnalyses>(); 9862 return PA; 9863 } 9864