1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <cstdlib> 147 #include <functional> 148 #include <iterator> 149 #include <limits> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 /// @{ 161 /// Metadata attribute names 162 static const char *const LLVMLoopVectorizeFollowupAll = 163 "llvm.loop.vectorize.followup_all"; 164 static const char *const LLVMLoopVectorizeFollowupVectorized = 165 "llvm.loop.vectorize.followup_vectorized"; 166 static const char *const LLVMLoopVectorizeFollowupEpilogue = 167 "llvm.loop.vectorize.followup_epilogue"; 168 /// @} 169 170 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 172 173 /// Loops with a known constant trip count below this number are vectorized only 174 /// if no scalar iteration overheads are incurred. 175 static cl::opt<unsigned> TinyTripCountVectorThreshold( 176 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 177 cl::desc("Loops with a constant trip count that is smaller than this " 178 "value are vectorized only if no scalar iteration overheads " 179 "are incurred.")); 180 181 // Indicates that an epilogue is undesired, predication is preferred. 182 // This means that the vectorizer will try to fold the loop-tail (epilogue) 183 // into the loop and predicate the loop body accordingly. 184 static cl::opt<bool> PreferPredicateOverEpilog( 185 "prefer-predicate-over-epilog", cl::init(false), cl::Hidden, 186 cl::desc("Indicate that an epilogue is undesired, predication should be " 187 "used instead.")); 188 189 static cl::opt<bool> MaximizeBandwidth( 190 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 191 cl::desc("Maximize bandwidth when selecting vectorization factor which " 192 "will be determined by the smallest type in loop.")); 193 194 static cl::opt<bool> EnableInterleavedMemAccesses( 195 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 196 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 197 198 /// An interleave-group may need masking if it resides in a block that needs 199 /// predication, or in order to mask away gaps. 200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 201 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 202 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 203 204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 205 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 206 cl::desc("We don't interleave loops with a estimated constant trip count " 207 "below this number")); 208 209 static cl::opt<unsigned> ForceTargetNumScalarRegs( 210 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 211 cl::desc("A flag that overrides the target's number of scalar registers.")); 212 213 static cl::opt<unsigned> ForceTargetNumVectorRegs( 214 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 215 cl::desc("A flag that overrides the target's number of vector registers.")); 216 217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 218 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 219 cl::desc("A flag that overrides the target's max interleave factor for " 220 "scalar loops.")); 221 222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 223 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 224 cl::desc("A flag that overrides the target's max interleave factor for " 225 "vectorized loops.")); 226 227 static cl::opt<unsigned> ForceTargetInstructionCost( 228 "force-target-instruction-cost", cl::init(0), cl::Hidden, 229 cl::desc("A flag that overrides the target's expected cost for " 230 "an instruction to a single constant value. Mostly " 231 "useful for getting consistent testing.")); 232 233 static cl::opt<unsigned> SmallLoopCost( 234 "small-loop-cost", cl::init(20), cl::Hidden, 235 cl::desc( 236 "The cost of a loop that is considered 'small' by the interleaver.")); 237 238 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 239 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 240 cl::desc("Enable the use of the block frequency analysis to access PGO " 241 "heuristics minimizing code growth in cold regions and being more " 242 "aggressive in hot regions.")); 243 244 // Runtime interleave loops for load/store throughput. 245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 246 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 247 cl::desc( 248 "Enable runtime interleaving until load/store ports are saturated")); 249 250 /// The number of stores in a loop that are allowed to need predication. 251 static cl::opt<unsigned> NumberOfStoresToPredicate( 252 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 253 cl::desc("Max number of stores to be predicated behind an if.")); 254 255 static cl::opt<bool> EnableIndVarRegisterHeur( 256 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 257 cl::desc("Count the induction variable only once when interleaving")); 258 259 static cl::opt<bool> EnableCondStoresVectorization( 260 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 261 cl::desc("Enable if predication of stores during vectorization.")); 262 263 static cl::opt<unsigned> MaxNestedScalarReductionIC( 264 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 265 cl::desc("The maximum interleave count to use when interleaving a scalar " 266 "reduction in a nested loop.")); 267 268 static cl::opt<bool> 269 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 270 cl::Hidden, 271 cl::desc("Prefer in-loop vector reductions, " 272 "overriding the targets preference.")); 273 274 static cl::opt<bool> PreferPredicatedReductionSelect( 275 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 276 cl::desc( 277 "Prefer predicating a reduction operation over an after loop select.")); 278 279 cl::opt<bool> EnableVPlanNativePath( 280 "enable-vplan-native-path", cl::init(false), cl::Hidden, 281 cl::desc("Enable VPlan-native vectorization path with " 282 "support for outer loop vectorization.")); 283 284 // FIXME: Remove this switch once we have divergence analysis. Currently we 285 // assume divergent non-backedge branches when this switch is true. 286 cl::opt<bool> EnableVPlanPredication( 287 "enable-vplan-predication", cl::init(false), cl::Hidden, 288 cl::desc("Enable VPlan-native vectorization path predicator with " 289 "support for outer loop vectorization.")); 290 291 // This flag enables the stress testing of the VPlan H-CFG construction in the 292 // VPlan-native vectorization path. It must be used in conjuction with 293 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 294 // verification of the H-CFGs built. 295 static cl::opt<bool> VPlanBuildStressTest( 296 "vplan-build-stress-test", cl::init(false), cl::Hidden, 297 cl::desc( 298 "Build VPlan for every supported loop nest in the function and bail " 299 "out right after the build (stress test the VPlan H-CFG construction " 300 "in the VPlan-native vectorization path).")); 301 302 cl::opt<bool> llvm::EnableLoopInterleaving( 303 "interleave-loops", cl::init(true), cl::Hidden, 304 cl::desc("Enable loop interleaving in Loop vectorization passes")); 305 cl::opt<bool> llvm::EnableLoopVectorization( 306 "vectorize-loops", cl::init(true), cl::Hidden, 307 cl::desc("Run the Loop vectorization passes")); 308 309 /// A helper function that returns the type of loaded or stored value. 310 static Type *getMemInstValueType(Value *I) { 311 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 312 "Expected Load or Store instruction"); 313 if (auto *LI = dyn_cast<LoadInst>(I)) 314 return LI->getType(); 315 return cast<StoreInst>(I)->getValueOperand()->getType(); 316 } 317 318 /// A helper function that returns true if the given type is irregular. The 319 /// type is irregular if its allocated size doesn't equal the store size of an 320 /// element of the corresponding vector type at the given vectorization factor. 321 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 322 // Determine if an array of VF elements of type Ty is "bitcast compatible" 323 // with a <VF x Ty> vector. 324 if (VF > 1) { 325 auto *VectorTy = FixedVectorType::get(Ty, VF); 326 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 327 } 328 329 // If the vectorization factor is one, we just check if an array of type Ty 330 // requires padding between elements. 331 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 332 } 333 334 /// A helper function that returns the reciprocal of the block probability of 335 /// predicated blocks. If we return X, we are assuming the predicated block 336 /// will execute once for every X iterations of the loop header. 337 /// 338 /// TODO: We should use actual block probability here, if available. Currently, 339 /// we always assume predicated blocks have a 50% chance of executing. 340 static unsigned getReciprocalPredBlockProb() { return 2; } 341 342 /// A helper function that adds a 'fast' flag to floating-point operations. 343 static Value *addFastMathFlag(Value *V) { 344 if (isa<FPMathOperator>(V)) 345 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 346 return V; 347 } 348 349 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 350 if (isa<FPMathOperator>(V)) 351 cast<Instruction>(V)->setFastMathFlags(FMF); 352 return V; 353 } 354 355 /// A helper function that returns an integer or floating-point constant with 356 /// value C. 357 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 358 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 359 : ConstantFP::get(Ty, C); 360 } 361 362 /// Returns "best known" trip count for the specified loop \p L as defined by 363 /// the following procedure: 364 /// 1) Returns exact trip count if it is known. 365 /// 2) Returns expected trip count according to profile data if any. 366 /// 3) Returns upper bound estimate if it is known. 367 /// 4) Returns None if all of the above failed. 368 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 369 // Check if exact trip count is known. 370 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 371 return ExpectedTC; 372 373 // Check if there is an expected trip count available from profile data. 374 if (LoopVectorizeWithBlockFrequency) 375 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 376 return EstimatedTC; 377 378 // Check if upper bound estimate is known. 379 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 380 return ExpectedTC; 381 382 return None; 383 } 384 385 namespace llvm { 386 387 /// InnerLoopVectorizer vectorizes loops which contain only one basic 388 /// block to a specified vectorization factor (VF). 389 /// This class performs the widening of scalars into vectors, or multiple 390 /// scalars. This class also implements the following features: 391 /// * It inserts an epilogue loop for handling loops that don't have iteration 392 /// counts that are known to be a multiple of the vectorization factor. 393 /// * It handles the code generation for reduction variables. 394 /// * Scalarization (implementation using scalars) of un-vectorizable 395 /// instructions. 396 /// InnerLoopVectorizer does not perform any vectorization-legality 397 /// checks, and relies on the caller to check for the different legality 398 /// aspects. The InnerLoopVectorizer relies on the 399 /// LoopVectorizationLegality class to provide information about the induction 400 /// and reduction variables that were found to a given vectorization factor. 401 class InnerLoopVectorizer { 402 public: 403 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 404 LoopInfo *LI, DominatorTree *DT, 405 const TargetLibraryInfo *TLI, 406 const TargetTransformInfo *TTI, AssumptionCache *AC, 407 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 408 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 409 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 410 ProfileSummaryInfo *PSI) 411 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 412 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 413 Builder(PSE.getSE()->getContext()), 414 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM), 415 BFI(BFI), PSI(PSI) { 416 // Query this against the original loop and save it here because the profile 417 // of the original loop header may change as the transformation happens. 418 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 419 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 420 } 421 422 virtual ~InnerLoopVectorizer() = default; 423 424 /// Create a new empty loop that will contain vectorized instructions later 425 /// on, while the old loop will be used as the scalar remainder. Control flow 426 /// is generated around the vectorized (and scalar epilogue) loops consisting 427 /// of various checks and bypasses. Return the pre-header block of the new 428 /// loop. 429 BasicBlock *createVectorizedLoopSkeleton(); 430 431 /// Widen a single instruction within the innermost loop. 432 void widenInstruction(Instruction &I, VPUser &Operands, 433 VPTransformState &State); 434 435 /// Widen a single call instruction within the innermost loop. 436 void widenCallInstruction(CallInst &I, VPUser &ArgOperands, 437 VPTransformState &State); 438 439 /// Widen a single select instruction within the innermost loop. 440 void widenSelectInstruction(SelectInst &I, VPUser &Operands, 441 bool InvariantCond, VPTransformState &State); 442 443 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 444 void fixVectorizedLoop(); 445 446 // Return true if any runtime check is added. 447 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 448 449 /// A type for vectorized values in the new loop. Each value from the 450 /// original loop, when vectorized, is represented by UF vector values in the 451 /// new unrolled loop, where UF is the unroll factor. 452 using VectorParts = SmallVector<Value *, 2>; 453 454 /// Vectorize a single GetElementPtrInst based on information gathered and 455 /// decisions taken during planning. 456 void widenGEP(GetElementPtrInst *GEP, VPUser &Indices, unsigned UF, 457 unsigned VF, bool IsPtrLoopInvariant, 458 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 459 460 /// Vectorize a single PHINode in a block. This method handles the induction 461 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 462 /// arbitrary length vectors. 463 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 464 465 /// A helper function to scalarize a single Instruction in the innermost loop. 466 /// Generates a sequence of scalar instances for each lane between \p MinLane 467 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 468 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 469 /// Instr's operands. 470 void scalarizeInstruction(Instruction *Instr, VPUser &Operands, 471 const VPIteration &Instance, bool IfPredicateInstr, 472 VPTransformState &State); 473 474 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 475 /// is provided, the integer induction variable will first be truncated to 476 /// the corresponding type. 477 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 478 479 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 480 /// vector or scalar value on-demand if one is not yet available. When 481 /// vectorizing a loop, we visit the definition of an instruction before its 482 /// uses. When visiting the definition, we either vectorize or scalarize the 483 /// instruction, creating an entry for it in the corresponding map. (In some 484 /// cases, such as induction variables, we will create both vector and scalar 485 /// entries.) Then, as we encounter uses of the definition, we derive values 486 /// for each scalar or vector use unless such a value is already available. 487 /// For example, if we scalarize a definition and one of its uses is vector, 488 /// we build the required vector on-demand with an insertelement sequence 489 /// when visiting the use. Otherwise, if the use is scalar, we can use the 490 /// existing scalar definition. 491 /// 492 /// Return a value in the new loop corresponding to \p V from the original 493 /// loop at unroll index \p Part. If the value has already been vectorized, 494 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 495 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 496 /// a new vector value on-demand by inserting the scalar values into a vector 497 /// with an insertelement sequence. If the value has been neither vectorized 498 /// nor scalarized, it must be loop invariant, so we simply broadcast the 499 /// value into a vector. 500 Value *getOrCreateVectorValue(Value *V, unsigned Part); 501 502 /// Return a value in the new loop corresponding to \p V from the original 503 /// loop at unroll and vector indices \p Instance. If the value has been 504 /// vectorized but not scalarized, the necessary extractelement instruction 505 /// will be generated. 506 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 507 508 /// Construct the vector value of a scalarized value \p V one lane at a time. 509 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 510 511 /// Try to vectorize interleaved access group \p Group with the base address 512 /// given in \p Addr, optionally masking the vector operations if \p 513 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 514 /// values in the vectorized loop. 515 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 516 VPTransformState &State, VPValue *Addr, 517 VPValue *BlockInMask = nullptr); 518 519 /// Vectorize Load and Store instructions with the base address given in \p 520 /// Addr, optionally masking the vector operations if \p BlockInMask is 521 /// non-null. Use \p State to translate given VPValues to IR values in the 522 /// vectorized loop. 523 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 524 VPValue *Addr, VPValue *StoredValue, 525 VPValue *BlockInMask); 526 527 /// Set the debug location in the builder using the debug location in 528 /// the instruction. 529 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 530 531 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 532 void fixNonInductionPHIs(void); 533 534 protected: 535 friend class LoopVectorizationPlanner; 536 537 /// A small list of PHINodes. 538 using PhiVector = SmallVector<PHINode *, 4>; 539 540 /// A type for scalarized values in the new loop. Each value from the 541 /// original loop, when scalarized, is represented by UF x VF scalar values 542 /// in the new unrolled loop, where UF is the unroll factor and VF is the 543 /// vectorization factor. 544 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 545 546 /// Set up the values of the IVs correctly when exiting the vector loop. 547 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 548 Value *CountRoundDown, Value *EndValue, 549 BasicBlock *MiddleBlock); 550 551 /// Create a new induction variable inside L. 552 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 553 Value *Step, Instruction *DL); 554 555 /// Handle all cross-iteration phis in the header. 556 void fixCrossIterationPHIs(); 557 558 /// Fix a first-order recurrence. This is the second phase of vectorizing 559 /// this phi node. 560 void fixFirstOrderRecurrence(PHINode *Phi); 561 562 /// Fix a reduction cross-iteration phi. This is the second phase of 563 /// vectorizing this phi node. 564 void fixReduction(PHINode *Phi); 565 566 /// Clear NSW/NUW flags from reduction instructions if necessary. 567 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc); 568 569 /// The Loop exit block may have single value PHI nodes with some 570 /// incoming value. While vectorizing we only handled real values 571 /// that were defined inside the loop and we should have one value for 572 /// each predecessor of its parent basic block. See PR14725. 573 void fixLCSSAPHIs(); 574 575 /// Iteratively sink the scalarized operands of a predicated instruction into 576 /// the block that was created for it. 577 void sinkScalarOperands(Instruction *PredInst); 578 579 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 580 /// represented as. 581 void truncateToMinimalBitwidths(); 582 583 /// Create a broadcast instruction. This method generates a broadcast 584 /// instruction (shuffle) for loop invariant values and for the induction 585 /// value. If this is the induction variable then we extend it to N, N+1, ... 586 /// this is needed because each iteration in the loop corresponds to a SIMD 587 /// element. 588 virtual Value *getBroadcastInstrs(Value *V); 589 590 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 591 /// to each vector element of Val. The sequence starts at StartIndex. 592 /// \p Opcode is relevant for FP induction variable. 593 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 594 Instruction::BinaryOps Opcode = 595 Instruction::BinaryOpsEnd); 596 597 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 598 /// variable on which to base the steps, \p Step is the size of the step, and 599 /// \p EntryVal is the value from the original loop that maps to the steps. 600 /// Note that \p EntryVal doesn't have to be an induction variable - it 601 /// can also be a truncate instruction. 602 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 603 const InductionDescriptor &ID); 604 605 /// Create a vector induction phi node based on an existing scalar one. \p 606 /// EntryVal is the value from the original loop that maps to the vector phi 607 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 608 /// truncate instruction, instead of widening the original IV, we widen a 609 /// version of the IV truncated to \p EntryVal's type. 610 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 611 Value *Step, Instruction *EntryVal); 612 613 /// Returns true if an instruction \p I should be scalarized instead of 614 /// vectorized for the chosen vectorization factor. 615 bool shouldScalarizeInstruction(Instruction *I) const; 616 617 /// Returns true if we should generate a scalar version of \p IV. 618 bool needsScalarInduction(Instruction *IV) const; 619 620 /// If there is a cast involved in the induction variable \p ID, which should 621 /// be ignored in the vectorized loop body, this function records the 622 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 623 /// cast. We had already proved that the casted Phi is equal to the uncasted 624 /// Phi in the vectorized loop (under a runtime guard), and therefore 625 /// there is no need to vectorize the cast - the same value can be used in the 626 /// vector loop for both the Phi and the cast. 627 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 628 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 629 /// 630 /// \p EntryVal is the value from the original loop that maps to the vector 631 /// phi node and is used to distinguish what is the IV currently being 632 /// processed - original one (if \p EntryVal is a phi corresponding to the 633 /// original IV) or the "newly-created" one based on the proof mentioned above 634 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 635 /// latter case \p EntryVal is a TruncInst and we must not record anything for 636 /// that IV, but it's error-prone to expect callers of this routine to care 637 /// about that, hence this explicit parameter. 638 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 639 const Instruction *EntryVal, 640 Value *VectorLoopValue, 641 unsigned Part, 642 unsigned Lane = UINT_MAX); 643 644 /// Generate a shuffle sequence that will reverse the vector Vec. 645 virtual Value *reverseVector(Value *Vec); 646 647 /// Returns (and creates if needed) the original loop trip count. 648 Value *getOrCreateTripCount(Loop *NewLoop); 649 650 /// Returns (and creates if needed) the trip count of the widened loop. 651 Value *getOrCreateVectorTripCount(Loop *NewLoop); 652 653 /// Returns a bitcasted value to the requested vector type. 654 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 655 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 656 const DataLayout &DL); 657 658 /// Emit a bypass check to see if the vector trip count is zero, including if 659 /// it overflows. 660 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 661 662 /// Emit a bypass check to see if all of the SCEV assumptions we've 663 /// had to make are correct. 664 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 665 666 /// Emit bypass checks to check any memory assumptions we may have made. 667 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 668 669 /// Compute the transformed value of Index at offset StartValue using step 670 /// StepValue. 671 /// For integer induction, returns StartValue + Index * StepValue. 672 /// For pointer induction, returns StartValue[Index * StepValue]. 673 /// FIXME: The newly created binary instructions should contain nsw/nuw 674 /// flags, which can be found from the original scalar operations. 675 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 676 const DataLayout &DL, 677 const InductionDescriptor &ID) const; 678 679 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 680 /// vector loop preheader, middle block and scalar preheader. Also 681 /// allocate a loop object for the new vector loop and return it. 682 Loop *createVectorLoopSkeleton(StringRef Prefix); 683 684 /// Create new phi nodes for the induction variables to resume iteration count 685 /// in the scalar epilogue, from where the vectorized loop left off (given by 686 /// \p VectorTripCount). 687 void createInductionResumeValues(Loop *L, Value *VectorTripCount); 688 689 /// Complete the loop skeleton by adding debug MDs, creating appropriate 690 /// conditional branches in the middle block, preparing the builder and 691 /// running the verifier. Take in the vector loop \p L as argument, and return 692 /// the preheader of the completed vector loop. 693 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 694 695 /// Add additional metadata to \p To that was not present on \p Orig. 696 /// 697 /// Currently this is used to add the noalias annotations based on the 698 /// inserted memchecks. Use this for instructions that are *cloned* into the 699 /// vector loop. 700 void addNewMetadata(Instruction *To, const Instruction *Orig); 701 702 /// Add metadata from one instruction to another. 703 /// 704 /// This includes both the original MDs from \p From and additional ones (\see 705 /// addNewMetadata). Use this for *newly created* instructions in the vector 706 /// loop. 707 void addMetadata(Instruction *To, Instruction *From); 708 709 /// Similar to the previous function but it adds the metadata to a 710 /// vector of instructions. 711 void addMetadata(ArrayRef<Value *> To, Instruction *From); 712 713 /// The original loop. 714 Loop *OrigLoop; 715 716 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 717 /// dynamic knowledge to simplify SCEV expressions and converts them to a 718 /// more usable form. 719 PredicatedScalarEvolution &PSE; 720 721 /// Loop Info. 722 LoopInfo *LI; 723 724 /// Dominator Tree. 725 DominatorTree *DT; 726 727 /// Alias Analysis. 728 AAResults *AA; 729 730 /// Target Library Info. 731 const TargetLibraryInfo *TLI; 732 733 /// Target Transform Info. 734 const TargetTransformInfo *TTI; 735 736 /// Assumption Cache. 737 AssumptionCache *AC; 738 739 /// Interface to emit optimization remarks. 740 OptimizationRemarkEmitter *ORE; 741 742 /// LoopVersioning. It's only set up (non-null) if memchecks were 743 /// used. 744 /// 745 /// This is currently only used to add no-alias metadata based on the 746 /// memchecks. The actually versioning is performed manually. 747 std::unique_ptr<LoopVersioning> LVer; 748 749 /// The vectorization SIMD factor to use. Each vector will have this many 750 /// vector elements. 751 unsigned VF; 752 753 /// The vectorization unroll factor to use. Each scalar is vectorized to this 754 /// many different vector instructions. 755 unsigned UF; 756 757 /// The builder that we use 758 IRBuilder<> Builder; 759 760 // --- Vectorization state --- 761 762 /// The vector-loop preheader. 763 BasicBlock *LoopVectorPreHeader; 764 765 /// The scalar-loop preheader. 766 BasicBlock *LoopScalarPreHeader; 767 768 /// Middle Block between the vector and the scalar. 769 BasicBlock *LoopMiddleBlock; 770 771 /// The ExitBlock of the scalar loop. 772 BasicBlock *LoopExitBlock; 773 774 /// The vector loop body. 775 BasicBlock *LoopVectorBody; 776 777 /// The scalar loop body. 778 BasicBlock *LoopScalarBody; 779 780 /// A list of all bypass blocks. The first block is the entry of the loop. 781 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 782 783 /// The new Induction variable which was added to the new block. 784 PHINode *Induction = nullptr; 785 786 /// The induction variable of the old basic block. 787 PHINode *OldInduction = nullptr; 788 789 /// Maps values from the original loop to their corresponding values in the 790 /// vectorized loop. A key value can map to either vector values, scalar 791 /// values or both kinds of values, depending on whether the key was 792 /// vectorized and scalarized. 793 VectorizerValueMap VectorLoopValueMap; 794 795 /// Store instructions that were predicated. 796 SmallVector<Instruction *, 4> PredicatedInstructions; 797 798 /// Trip count of the original loop. 799 Value *TripCount = nullptr; 800 801 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 802 Value *VectorTripCount = nullptr; 803 804 /// The legality analysis. 805 LoopVectorizationLegality *Legal; 806 807 /// The profitablity analysis. 808 LoopVectorizationCostModel *Cost; 809 810 // Record whether runtime checks are added. 811 bool AddedSafetyChecks = false; 812 813 // Holds the end values for each induction variable. We save the end values 814 // so we can later fix-up the external users of the induction variables. 815 DenseMap<PHINode *, Value *> IVEndValues; 816 817 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 818 // fixed up at the end of vector code generation. 819 SmallVector<PHINode *, 8> OrigPHIsToFix; 820 821 /// BFI and PSI are used to check for profile guided size optimizations. 822 BlockFrequencyInfo *BFI; 823 ProfileSummaryInfo *PSI; 824 825 // Whether this loop should be optimized for size based on profile guided size 826 // optimizatios. 827 bool OptForSizeBasedOnProfile; 828 }; 829 830 class InnerLoopUnroller : public InnerLoopVectorizer { 831 public: 832 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 833 LoopInfo *LI, DominatorTree *DT, 834 const TargetLibraryInfo *TLI, 835 const TargetTransformInfo *TTI, AssumptionCache *AC, 836 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 837 LoopVectorizationLegality *LVL, 838 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 839 ProfileSummaryInfo *PSI) 840 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 841 UnrollFactor, LVL, CM, BFI, PSI) {} 842 843 private: 844 Value *getBroadcastInstrs(Value *V) override; 845 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 846 Instruction::BinaryOps Opcode = 847 Instruction::BinaryOpsEnd) override; 848 Value *reverseVector(Value *Vec) override; 849 }; 850 851 } // end namespace llvm 852 853 /// Look for a meaningful debug location on the instruction or it's 854 /// operands. 855 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 856 if (!I) 857 return I; 858 859 DebugLoc Empty; 860 if (I->getDebugLoc() != Empty) 861 return I; 862 863 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 864 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 865 if (OpInst->getDebugLoc() != Empty) 866 return OpInst; 867 } 868 869 return I; 870 } 871 872 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 873 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 874 const DILocation *DIL = Inst->getDebugLoc(); 875 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 876 !isa<DbgInfoIntrinsic>(Inst)) { 877 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF); 878 if (NewDIL) 879 B.SetCurrentDebugLocation(NewDIL.getValue()); 880 else 881 LLVM_DEBUG(dbgs() 882 << "Failed to create new discriminator: " 883 << DIL->getFilename() << " Line: " << DIL->getLine()); 884 } 885 else 886 B.SetCurrentDebugLocation(DIL); 887 } else 888 B.SetCurrentDebugLocation(DebugLoc()); 889 } 890 891 /// Write a record \p DebugMsg about vectorization failure to the debug 892 /// output stream. If \p I is passed, it is an instruction that prevents 893 /// vectorization. 894 #ifndef NDEBUG 895 static void debugVectorizationFailure(const StringRef DebugMsg, 896 Instruction *I) { 897 dbgs() << "LV: Not vectorizing: " << DebugMsg; 898 if (I != nullptr) 899 dbgs() << " " << *I; 900 else 901 dbgs() << '.'; 902 dbgs() << '\n'; 903 } 904 #endif 905 906 /// Create an analysis remark that explains why vectorization failed 907 /// 908 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 909 /// RemarkName is the identifier for the remark. If \p I is passed it is an 910 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 911 /// the location of the remark. \return the remark object that can be 912 /// streamed to. 913 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 914 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 915 Value *CodeRegion = TheLoop->getHeader(); 916 DebugLoc DL = TheLoop->getStartLoc(); 917 918 if (I) { 919 CodeRegion = I->getParent(); 920 // If there is no debug location attached to the instruction, revert back to 921 // using the loop's. 922 if (I->getDebugLoc()) 923 DL = I->getDebugLoc(); 924 } 925 926 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 927 R << "loop not vectorized: "; 928 return R; 929 } 930 931 namespace llvm { 932 933 void reportVectorizationFailure(const StringRef DebugMsg, 934 const StringRef OREMsg, const StringRef ORETag, 935 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 936 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 937 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 938 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 939 ORETag, TheLoop, I) << OREMsg); 940 } 941 942 } // end namespace llvm 943 944 #ifndef NDEBUG 945 /// \return string containing a file name and a line # for the given loop. 946 static std::string getDebugLocString(const Loop *L) { 947 std::string Result; 948 if (L) { 949 raw_string_ostream OS(Result); 950 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 951 LoopDbgLoc.print(OS); 952 else 953 // Just print the module name. 954 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 955 OS.flush(); 956 } 957 return Result; 958 } 959 #endif 960 961 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 962 const Instruction *Orig) { 963 // If the loop was versioned with memchecks, add the corresponding no-alias 964 // metadata. 965 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 966 LVer->annotateInstWithNoAlias(To, Orig); 967 } 968 969 void InnerLoopVectorizer::addMetadata(Instruction *To, 970 Instruction *From) { 971 propagateMetadata(To, From); 972 addNewMetadata(To, From); 973 } 974 975 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 976 Instruction *From) { 977 for (Value *V : To) { 978 if (Instruction *I = dyn_cast<Instruction>(V)) 979 addMetadata(I, From); 980 } 981 } 982 983 namespace llvm { 984 985 // Loop vectorization cost-model hints how the scalar epilogue loop should be 986 // lowered. 987 enum ScalarEpilogueLowering { 988 989 // The default: allowing scalar epilogues. 990 CM_ScalarEpilogueAllowed, 991 992 // Vectorization with OptForSize: don't allow epilogues. 993 CM_ScalarEpilogueNotAllowedOptSize, 994 995 // A special case of vectorisation with OptForSize: loops with a very small 996 // trip count are considered for vectorization under OptForSize, thereby 997 // making sure the cost of their loop body is dominant, free of runtime 998 // guards and scalar iteration overheads. 999 CM_ScalarEpilogueNotAllowedLowTripLoop, 1000 1001 // Loop hint predicate indicating an epilogue is undesired. 1002 CM_ScalarEpilogueNotNeededUsePredicate 1003 }; 1004 1005 /// LoopVectorizationCostModel - estimates the expected speedups due to 1006 /// vectorization. 1007 /// In many cases vectorization is not profitable. This can happen because of 1008 /// a number of reasons. In this class we mainly attempt to predict the 1009 /// expected speedup/slowdowns due to the supported instruction set. We use the 1010 /// TargetTransformInfo to query the different backends for the cost of 1011 /// different operations. 1012 class LoopVectorizationCostModel { 1013 public: 1014 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1015 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1016 LoopVectorizationLegality *Legal, 1017 const TargetTransformInfo &TTI, 1018 const TargetLibraryInfo *TLI, DemandedBits *DB, 1019 AssumptionCache *AC, 1020 OptimizationRemarkEmitter *ORE, const Function *F, 1021 const LoopVectorizeHints *Hints, 1022 InterleavedAccessInfo &IAI) 1023 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1024 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1025 Hints(Hints), InterleaveInfo(IAI) {} 1026 1027 /// \return An upper bound for the vectorization factor, or None if 1028 /// vectorization and interleaving should be avoided up front. 1029 Optional<unsigned> computeMaxVF(unsigned UserVF, unsigned UserIC); 1030 1031 /// \return True if runtime checks are required for vectorization, and false 1032 /// otherwise. 1033 bool runtimeChecksRequired(); 1034 1035 /// \return The most profitable vectorization factor and the cost of that VF. 1036 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 1037 /// then this vectorization factor will be selected if vectorization is 1038 /// possible. 1039 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 1040 1041 /// Setup cost-based decisions for user vectorization factor. 1042 void selectUserVectorizationFactor(unsigned UserVF) { 1043 collectUniformsAndScalars(UserVF); 1044 collectInstsToScalarize(UserVF); 1045 } 1046 1047 /// \return The size (in bits) of the smallest and widest types in the code 1048 /// that needs to be vectorized. We ignore values that remain scalar such as 1049 /// 64 bit loop indices. 1050 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1051 1052 /// \return The desired interleave count. 1053 /// If interleave count has been specified by metadata it will be returned. 1054 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1055 /// are the selected vectorization factor and the cost of the selected VF. 1056 unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost); 1057 1058 /// Memory access instruction may be vectorized in more than one way. 1059 /// Form of instruction after vectorization depends on cost. 1060 /// This function takes cost-based decisions for Load/Store instructions 1061 /// and collects them in a map. This decisions map is used for building 1062 /// the lists of loop-uniform and loop-scalar instructions. 1063 /// The calculated cost is saved with widening decision in order to 1064 /// avoid redundant calculations. 1065 void setCostBasedWideningDecision(unsigned VF); 1066 1067 /// A struct that represents some properties of the register usage 1068 /// of a loop. 1069 struct RegisterUsage { 1070 /// Holds the number of loop invariant values that are used in the loop. 1071 /// The key is ClassID of target-provided register class. 1072 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1073 /// Holds the maximum number of concurrent live intervals in the loop. 1074 /// The key is ClassID of target-provided register class. 1075 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1076 }; 1077 1078 /// \return Returns information about the register usages of the loop for the 1079 /// given vectorization factors. 1080 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1081 1082 /// Collect values we want to ignore in the cost model. 1083 void collectValuesToIgnore(); 1084 1085 /// Split reductions into those that happen in the loop, and those that happen 1086 /// outside. In loop reductions are collected into InLoopReductionChains. 1087 void collectInLoopReductions(); 1088 1089 /// \returns The smallest bitwidth each instruction can be represented with. 1090 /// The vector equivalents of these instructions should be truncated to this 1091 /// type. 1092 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1093 return MinBWs; 1094 } 1095 1096 /// \returns True if it is more profitable to scalarize instruction \p I for 1097 /// vectorization factor \p VF. 1098 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1099 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 1100 1101 // Cost model is not run in the VPlan-native path - return conservative 1102 // result until this changes. 1103 if (EnableVPlanNativePath) 1104 return false; 1105 1106 auto Scalars = InstsToScalarize.find(VF); 1107 assert(Scalars != InstsToScalarize.end() && 1108 "VF not yet analyzed for scalarization profitability"); 1109 return Scalars->second.find(I) != Scalars->second.end(); 1110 } 1111 1112 /// Returns true if \p I is known to be uniform after vectorization. 1113 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1114 if (VF == 1) 1115 return true; 1116 1117 // Cost model is not run in the VPlan-native path - return conservative 1118 // result until this changes. 1119 if (EnableVPlanNativePath) 1120 return false; 1121 1122 auto UniformsPerVF = Uniforms.find(VF); 1123 assert(UniformsPerVF != Uniforms.end() && 1124 "VF not yet analyzed for uniformity"); 1125 return UniformsPerVF->second.count(I); 1126 } 1127 1128 /// Returns true if \p I is known to be scalar after vectorization. 1129 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1130 if (VF == 1) 1131 return true; 1132 1133 // Cost model is not run in the VPlan-native path - return conservative 1134 // result until this changes. 1135 if (EnableVPlanNativePath) 1136 return false; 1137 1138 auto ScalarsPerVF = Scalars.find(VF); 1139 assert(ScalarsPerVF != Scalars.end() && 1140 "Scalar values are not calculated for VF"); 1141 return ScalarsPerVF->second.count(I); 1142 } 1143 1144 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1145 /// for vectorization factor \p VF. 1146 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1147 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 1148 !isProfitableToScalarize(I, VF) && 1149 !isScalarAfterVectorization(I, VF); 1150 } 1151 1152 /// Decision that was taken during cost calculation for memory instruction. 1153 enum InstWidening { 1154 CM_Unknown, 1155 CM_Widen, // For consecutive accesses with stride +1. 1156 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1157 CM_Interleave, 1158 CM_GatherScatter, 1159 CM_Scalarize 1160 }; 1161 1162 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1163 /// instruction \p I and vector width \p VF. 1164 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1165 unsigned Cost) { 1166 assert(VF >= 2 && "Expected VF >=2"); 1167 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1168 } 1169 1170 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1171 /// interleaving group \p Grp and vector width \p VF. 1172 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 1173 InstWidening W, unsigned Cost) { 1174 assert(VF >= 2 && "Expected VF >=2"); 1175 /// Broadcast this decicion to all instructions inside the group. 1176 /// But the cost will be assigned to one instruction only. 1177 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1178 if (auto *I = Grp->getMember(i)) { 1179 if (Grp->getInsertPos() == I) 1180 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1181 else 1182 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1183 } 1184 } 1185 } 1186 1187 /// Return the cost model decision for the given instruction \p I and vector 1188 /// width \p VF. Return CM_Unknown if this instruction did not pass 1189 /// through the cost modeling. 1190 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1191 assert(VF >= 2 && "Expected VF >=2"); 1192 1193 // Cost model is not run in the VPlan-native path - return conservative 1194 // result until this changes. 1195 if (EnableVPlanNativePath) 1196 return CM_GatherScatter; 1197 1198 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1199 auto Itr = WideningDecisions.find(InstOnVF); 1200 if (Itr == WideningDecisions.end()) 1201 return CM_Unknown; 1202 return Itr->second.first; 1203 } 1204 1205 /// Return the vectorization cost for the given instruction \p I and vector 1206 /// width \p VF. 1207 unsigned getWideningCost(Instruction *I, unsigned VF) { 1208 assert(VF >= 2 && "Expected VF >=2"); 1209 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1210 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1211 "The cost is not calculated"); 1212 return WideningDecisions[InstOnVF].second; 1213 } 1214 1215 /// Return True if instruction \p I is an optimizable truncate whose operand 1216 /// is an induction variable. Such a truncate will be removed by adding a new 1217 /// induction variable with the destination type. 1218 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1219 // If the instruction is not a truncate, return false. 1220 auto *Trunc = dyn_cast<TruncInst>(I); 1221 if (!Trunc) 1222 return false; 1223 1224 // Get the source and destination types of the truncate. 1225 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1226 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1227 1228 // If the truncate is free for the given types, return false. Replacing a 1229 // free truncate with an induction variable would add an induction variable 1230 // update instruction to each iteration of the loop. We exclude from this 1231 // check the primary induction variable since it will need an update 1232 // instruction regardless. 1233 Value *Op = Trunc->getOperand(0); 1234 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1235 return false; 1236 1237 // If the truncated value is not an induction variable, return false. 1238 return Legal->isInductionPhi(Op); 1239 } 1240 1241 /// Collects the instructions to scalarize for each predicated instruction in 1242 /// the loop. 1243 void collectInstsToScalarize(unsigned VF); 1244 1245 /// Collect Uniform and Scalar values for the given \p VF. 1246 /// The sets depend on CM decision for Load/Store instructions 1247 /// that may be vectorized as interleave, gather-scatter or scalarized. 1248 void collectUniformsAndScalars(unsigned VF) { 1249 // Do the analysis once. 1250 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1251 return; 1252 setCostBasedWideningDecision(VF); 1253 collectLoopUniforms(VF); 1254 collectLoopScalars(VF); 1255 } 1256 1257 /// Returns true if the target machine supports masked store operation 1258 /// for the given \p DataType and kind of access to \p Ptr. 1259 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) { 1260 return Legal->isConsecutivePtr(Ptr) && 1261 TTI.isLegalMaskedStore(DataType, Alignment); 1262 } 1263 1264 /// Returns true if the target machine supports masked load operation 1265 /// for the given \p DataType and kind of access to \p Ptr. 1266 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) { 1267 return Legal->isConsecutivePtr(Ptr) && 1268 TTI.isLegalMaskedLoad(DataType, Alignment); 1269 } 1270 1271 /// Returns true if the target machine supports masked scatter operation 1272 /// for the given \p DataType. 1273 bool isLegalMaskedScatter(Type *DataType, Align Alignment) { 1274 return TTI.isLegalMaskedScatter(DataType, Alignment); 1275 } 1276 1277 /// Returns true if the target machine supports masked gather operation 1278 /// for the given \p DataType. 1279 bool isLegalMaskedGather(Type *DataType, Align Alignment) { 1280 return TTI.isLegalMaskedGather(DataType, Alignment); 1281 } 1282 1283 /// Returns true if the target machine can represent \p V as a masked gather 1284 /// or scatter operation. 1285 bool isLegalGatherOrScatter(Value *V) { 1286 bool LI = isa<LoadInst>(V); 1287 bool SI = isa<StoreInst>(V); 1288 if (!LI && !SI) 1289 return false; 1290 auto *Ty = getMemInstValueType(V); 1291 Align Align = getLoadStoreAlignment(V); 1292 return (LI && isLegalMaskedGather(Ty, Align)) || 1293 (SI && isLegalMaskedScatter(Ty, Align)); 1294 } 1295 1296 /// Returns true if \p I is an instruction that will be scalarized with 1297 /// predication. Such instructions include conditional stores and 1298 /// instructions that may divide by zero. 1299 /// If a non-zero VF has been calculated, we check if I will be scalarized 1300 /// predication for that VF. 1301 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1302 1303 // Returns true if \p I is an instruction that will be predicated either 1304 // through scalar predication or masked load/store or masked gather/scatter. 1305 // Superset of instructions that return true for isScalarWithPredication. 1306 bool isPredicatedInst(Instruction *I) { 1307 if (!blockNeedsPredication(I->getParent())) 1308 return false; 1309 // Loads and stores that need some form of masked operation are predicated 1310 // instructions. 1311 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1312 return Legal->isMaskRequired(I); 1313 return isScalarWithPredication(I); 1314 } 1315 1316 /// Returns true if \p I is a memory instruction with consecutive memory 1317 /// access that can be widened. 1318 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1319 1320 /// Returns true if \p I is a memory instruction in an interleaved-group 1321 /// of memory accesses that can be vectorized with wide vector loads/stores 1322 /// and shuffles. 1323 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1324 1325 /// Check if \p Instr belongs to any interleaved access group. 1326 bool isAccessInterleaved(Instruction *Instr) { 1327 return InterleaveInfo.isInterleaved(Instr); 1328 } 1329 1330 /// Get the interleaved access group that \p Instr belongs to. 1331 const InterleaveGroup<Instruction> * 1332 getInterleavedAccessGroup(Instruction *Instr) { 1333 return InterleaveInfo.getInterleaveGroup(Instr); 1334 } 1335 1336 /// Returns true if an interleaved group requires a scalar iteration 1337 /// to handle accesses with gaps, and there is nothing preventing us from 1338 /// creating a scalar epilogue. 1339 bool requiresScalarEpilogue() const { 1340 return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue(); 1341 } 1342 1343 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1344 /// loop hint annotation. 1345 bool isScalarEpilogueAllowed() const { 1346 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1347 } 1348 1349 /// Returns true if all loop blocks should be masked to fold tail loop. 1350 bool foldTailByMasking() const { return FoldTailByMasking; } 1351 1352 bool blockNeedsPredication(BasicBlock *BB) { 1353 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1354 } 1355 1356 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1357 /// nodes to the chain of instructions representing the reductions. Uses a 1358 /// MapVector to ensure deterministic iteration order. 1359 using ReductionChainMap = 1360 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1361 1362 /// Return the chain of instructions representing an inloop reduction. 1363 const ReductionChainMap &getInLoopReductionChains() const { 1364 return InLoopReductionChains; 1365 } 1366 1367 /// Returns true if the Phi is part of an inloop reduction. 1368 bool isInLoopReduction(PHINode *Phi) const { 1369 return InLoopReductionChains.count(Phi); 1370 } 1371 1372 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1373 /// with factor VF. Return the cost of the instruction, including 1374 /// scalarization overhead if it's needed. 1375 unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF); 1376 1377 /// Estimate cost of a call instruction CI if it were vectorized with factor 1378 /// VF. Return the cost of the instruction, including scalarization overhead 1379 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1380 /// scalarized - 1381 /// i.e. either vector version isn't available, or is too expensive. 1382 unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize); 1383 1384 /// Invalidates decisions already taken by the cost model. 1385 void invalidateCostModelingDecisions() { 1386 WideningDecisions.clear(); 1387 Uniforms.clear(); 1388 Scalars.clear(); 1389 } 1390 1391 private: 1392 unsigned NumPredStores = 0; 1393 1394 /// \return An upper bound for the vectorization factor, a power-of-2 larger 1395 /// than zero. One is returned if vectorization should best be avoided due 1396 /// to cost. 1397 unsigned computeFeasibleMaxVF(unsigned ConstTripCount); 1398 1399 /// The vectorization cost is a combination of the cost itself and a boolean 1400 /// indicating whether any of the contributing operations will actually 1401 /// operate on 1402 /// vector values after type legalization in the backend. If this latter value 1403 /// is 1404 /// false, then all operations will be scalarized (i.e. no vectorization has 1405 /// actually taken place). 1406 using VectorizationCostTy = std::pair<unsigned, bool>; 1407 1408 /// Returns the expected execution cost. The unit of the cost does 1409 /// not matter because we use the 'cost' units to compare different 1410 /// vector widths. The cost that is returned is *not* normalized by 1411 /// the factor width. 1412 VectorizationCostTy expectedCost(unsigned VF); 1413 1414 /// Returns the execution time cost of an instruction for a given vector 1415 /// width. Vector width of one means scalar. 1416 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1417 1418 /// The cost-computation logic from getInstructionCost which provides 1419 /// the vector type as an output parameter. 1420 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1421 1422 /// Calculate vectorization cost of memory instruction \p I. 1423 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1424 1425 /// The cost computation for scalarized memory instruction. 1426 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1427 1428 /// The cost computation for interleaving group of memory instructions. 1429 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1430 1431 /// The cost computation for Gather/Scatter instruction. 1432 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1433 1434 /// The cost computation for widening instruction \p I with consecutive 1435 /// memory access. 1436 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1437 1438 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1439 /// Load: scalar load + broadcast. 1440 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1441 /// element) 1442 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1443 1444 /// Estimate the overhead of scalarizing an instruction. This is a 1445 /// convenience wrapper for the type-based getScalarizationOverhead API. 1446 unsigned getScalarizationOverhead(Instruction *I, unsigned VF); 1447 1448 /// Returns whether the instruction is a load or store and will be a emitted 1449 /// as a vector operation. 1450 bool isConsecutiveLoadOrStore(Instruction *I); 1451 1452 /// Returns true if an artificially high cost for emulated masked memrefs 1453 /// should be used. 1454 bool useEmulatedMaskMemRefHack(Instruction *I); 1455 1456 /// Map of scalar integer values to the smallest bitwidth they can be legally 1457 /// represented as. The vector equivalents of these values should be truncated 1458 /// to this type. 1459 MapVector<Instruction *, uint64_t> MinBWs; 1460 1461 /// A type representing the costs for instructions if they were to be 1462 /// scalarized rather than vectorized. The entries are Instruction-Cost 1463 /// pairs. 1464 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1465 1466 /// A set containing all BasicBlocks that are known to present after 1467 /// vectorization as a predicated block. 1468 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1469 1470 /// Records whether it is allowed to have the original scalar loop execute at 1471 /// least once. This may be needed as a fallback loop in case runtime 1472 /// aliasing/dependence checks fail, or to handle the tail/remainder 1473 /// iterations when the trip count is unknown or doesn't divide by the VF, 1474 /// or as a peel-loop to handle gaps in interleave-groups. 1475 /// Under optsize and when the trip count is very small we don't allow any 1476 /// iterations to execute in the scalar loop. 1477 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1478 1479 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1480 bool FoldTailByMasking = false; 1481 1482 /// A map holding scalar costs for different vectorization factors. The 1483 /// presence of a cost for an instruction in the mapping indicates that the 1484 /// instruction will be scalarized when vectorizing with the associated 1485 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1486 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1487 1488 /// Holds the instructions known to be uniform after vectorization. 1489 /// The data is collected per VF. 1490 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1491 1492 /// Holds the instructions known to be scalar after vectorization. 1493 /// The data is collected per VF. 1494 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1495 1496 /// Holds the instructions (address computations) that are forced to be 1497 /// scalarized. 1498 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1499 1500 /// PHINodes of the reductions that should be expanded in-loop along with 1501 /// their associated chains of reduction operations, in program order from top 1502 /// (PHI) to bottom 1503 ReductionChainMap InLoopReductionChains; 1504 1505 /// Returns the expected difference in cost from scalarizing the expression 1506 /// feeding a predicated instruction \p PredInst. The instructions to 1507 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1508 /// non-negative return value implies the expression will be scalarized. 1509 /// Currently, only single-use chains are considered for scalarization. 1510 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1511 unsigned VF); 1512 1513 /// Collect the instructions that are uniform after vectorization. An 1514 /// instruction is uniform if we represent it with a single scalar value in 1515 /// the vectorized loop corresponding to each vector iteration. Examples of 1516 /// uniform instructions include pointer operands of consecutive or 1517 /// interleaved memory accesses. Note that although uniformity implies an 1518 /// instruction will be scalar, the reverse is not true. In general, a 1519 /// scalarized instruction will be represented by VF scalar values in the 1520 /// vectorized loop, each corresponding to an iteration of the original 1521 /// scalar loop. 1522 void collectLoopUniforms(unsigned VF); 1523 1524 /// Collect the instructions that are scalar after vectorization. An 1525 /// instruction is scalar if it is known to be uniform or will be scalarized 1526 /// during vectorization. Non-uniform scalarized instructions will be 1527 /// represented by VF values in the vectorized loop, each corresponding to an 1528 /// iteration of the original scalar loop. 1529 void collectLoopScalars(unsigned VF); 1530 1531 /// Keeps cost model vectorization decision and cost for instructions. 1532 /// Right now it is used for memory instructions only. 1533 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1534 std::pair<InstWidening, unsigned>>; 1535 1536 DecisionList WideningDecisions; 1537 1538 /// Returns true if \p V is expected to be vectorized and it needs to be 1539 /// extracted. 1540 bool needsExtract(Value *V, unsigned VF) const { 1541 Instruction *I = dyn_cast<Instruction>(V); 1542 if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I)) 1543 return false; 1544 1545 // Assume we can vectorize V (and hence we need extraction) if the 1546 // scalars are not computed yet. This can happen, because it is called 1547 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1548 // the scalars are collected. That should be a safe assumption in most 1549 // cases, because we check if the operands have vectorizable types 1550 // beforehand in LoopVectorizationLegality. 1551 return Scalars.find(VF) == Scalars.end() || 1552 !isScalarAfterVectorization(I, VF); 1553 }; 1554 1555 /// Returns a range containing only operands needing to be extracted. 1556 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1557 unsigned VF) { 1558 return SmallVector<Value *, 4>(make_filter_range( 1559 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1560 } 1561 1562 public: 1563 /// The loop that we evaluate. 1564 Loop *TheLoop; 1565 1566 /// Predicated scalar evolution analysis. 1567 PredicatedScalarEvolution &PSE; 1568 1569 /// Loop Info analysis. 1570 LoopInfo *LI; 1571 1572 /// Vectorization legality. 1573 LoopVectorizationLegality *Legal; 1574 1575 /// Vector target information. 1576 const TargetTransformInfo &TTI; 1577 1578 /// Target Library Info. 1579 const TargetLibraryInfo *TLI; 1580 1581 /// Demanded bits analysis. 1582 DemandedBits *DB; 1583 1584 /// Assumption cache. 1585 AssumptionCache *AC; 1586 1587 /// Interface to emit optimization remarks. 1588 OptimizationRemarkEmitter *ORE; 1589 1590 const Function *TheFunction; 1591 1592 /// Loop Vectorize Hint. 1593 const LoopVectorizeHints *Hints; 1594 1595 /// The interleave access information contains groups of interleaved accesses 1596 /// with the same stride and close to each other. 1597 InterleavedAccessInfo &InterleaveInfo; 1598 1599 /// Values to ignore in the cost model. 1600 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1601 1602 /// Values to ignore in the cost model when VF > 1. 1603 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1604 }; 1605 1606 } // end namespace llvm 1607 1608 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1609 // vectorization. The loop needs to be annotated with #pragma omp simd 1610 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1611 // vector length information is not provided, vectorization is not considered 1612 // explicit. Interleave hints are not allowed either. These limitations will be 1613 // relaxed in the future. 1614 // Please, note that we are currently forced to abuse the pragma 'clang 1615 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1616 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1617 // provides *explicit vectorization hints* (LV can bypass legal checks and 1618 // assume that vectorization is legal). However, both hints are implemented 1619 // using the same metadata (llvm.loop.vectorize, processed by 1620 // LoopVectorizeHints). This will be fixed in the future when the native IR 1621 // representation for pragma 'omp simd' is introduced. 1622 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1623 OptimizationRemarkEmitter *ORE) { 1624 assert(!OuterLp->empty() && "This is not an outer loop"); 1625 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1626 1627 // Only outer loops with an explicit vectorization hint are supported. 1628 // Unannotated outer loops are ignored. 1629 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1630 return false; 1631 1632 Function *Fn = OuterLp->getHeader()->getParent(); 1633 if (!Hints.allowVectorization(Fn, OuterLp, 1634 true /*VectorizeOnlyWhenForced*/)) { 1635 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1636 return false; 1637 } 1638 1639 if (Hints.getInterleave() > 1) { 1640 // TODO: Interleave support is future work. 1641 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1642 "outer loops.\n"); 1643 Hints.emitRemarkWithHints(); 1644 return false; 1645 } 1646 1647 return true; 1648 } 1649 1650 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1651 OptimizationRemarkEmitter *ORE, 1652 SmallVectorImpl<Loop *> &V) { 1653 // Collect inner loops and outer loops without irreducible control flow. For 1654 // now, only collect outer loops that have explicit vectorization hints. If we 1655 // are stress testing the VPlan H-CFG construction, we collect the outermost 1656 // loop of every loop nest. 1657 if (L.empty() || VPlanBuildStressTest || 1658 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1659 LoopBlocksRPO RPOT(&L); 1660 RPOT.perform(LI); 1661 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1662 V.push_back(&L); 1663 // TODO: Collect inner loops inside marked outer loops in case 1664 // vectorization fails for the outer loop. Do not invoke 1665 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1666 // already known to be reducible. We can use an inherited attribute for 1667 // that. 1668 return; 1669 } 1670 } 1671 for (Loop *InnerL : L) 1672 collectSupportedLoops(*InnerL, LI, ORE, V); 1673 } 1674 1675 namespace { 1676 1677 /// The LoopVectorize Pass. 1678 struct LoopVectorize : public FunctionPass { 1679 /// Pass identification, replacement for typeid 1680 static char ID; 1681 1682 LoopVectorizePass Impl; 1683 1684 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1685 bool VectorizeOnlyWhenForced = false) 1686 : FunctionPass(ID), 1687 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1688 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1689 } 1690 1691 bool runOnFunction(Function &F) override { 1692 if (skipFunction(F)) 1693 return false; 1694 1695 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1696 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1697 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1698 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1699 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1700 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1701 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1702 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1703 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1704 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1705 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1706 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1707 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1708 1709 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1710 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1711 1712 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1713 GetLAA, *ORE, PSI).MadeAnyChange; 1714 } 1715 1716 void getAnalysisUsage(AnalysisUsage &AU) const override { 1717 AU.addRequired<AssumptionCacheTracker>(); 1718 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1719 AU.addRequired<DominatorTreeWrapperPass>(); 1720 AU.addRequired<LoopInfoWrapperPass>(); 1721 AU.addRequired<ScalarEvolutionWrapperPass>(); 1722 AU.addRequired<TargetTransformInfoWrapperPass>(); 1723 AU.addRequired<AAResultsWrapperPass>(); 1724 AU.addRequired<LoopAccessLegacyAnalysis>(); 1725 AU.addRequired<DemandedBitsWrapperPass>(); 1726 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1727 AU.addRequired<InjectTLIMappingsLegacy>(); 1728 1729 // We currently do not preserve loopinfo/dominator analyses with outer loop 1730 // vectorization. Until this is addressed, mark these analyses as preserved 1731 // only for non-VPlan-native path. 1732 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1733 if (!EnableVPlanNativePath) { 1734 AU.addPreserved<LoopInfoWrapperPass>(); 1735 AU.addPreserved<DominatorTreeWrapperPass>(); 1736 } 1737 1738 AU.addPreserved<BasicAAWrapperPass>(); 1739 AU.addPreserved<GlobalsAAWrapperPass>(); 1740 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1741 } 1742 }; 1743 1744 } // end anonymous namespace 1745 1746 //===----------------------------------------------------------------------===// 1747 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1748 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1749 //===----------------------------------------------------------------------===// 1750 1751 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1752 // We need to place the broadcast of invariant variables outside the loop, 1753 // but only if it's proven safe to do so. Else, broadcast will be inside 1754 // vector loop body. 1755 Instruction *Instr = dyn_cast<Instruction>(V); 1756 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1757 (!Instr || 1758 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1759 // Place the code for broadcasting invariant variables in the new preheader. 1760 IRBuilder<>::InsertPointGuard Guard(Builder); 1761 if (SafeToHoist) 1762 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1763 1764 // Broadcast the scalar into all locations in the vector. 1765 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1766 1767 return Shuf; 1768 } 1769 1770 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1771 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1772 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1773 "Expected either an induction phi-node or a truncate of it!"); 1774 Value *Start = II.getStartValue(); 1775 1776 // Construct the initial value of the vector IV in the vector loop preheader 1777 auto CurrIP = Builder.saveIP(); 1778 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1779 if (isa<TruncInst>(EntryVal)) { 1780 assert(Start->getType()->isIntegerTy() && 1781 "Truncation requires an integer type"); 1782 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1783 Step = Builder.CreateTrunc(Step, TruncType); 1784 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1785 } 1786 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1787 Value *SteppedStart = 1788 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1789 1790 // We create vector phi nodes for both integer and floating-point induction 1791 // variables. Here, we determine the kind of arithmetic we will perform. 1792 Instruction::BinaryOps AddOp; 1793 Instruction::BinaryOps MulOp; 1794 if (Step->getType()->isIntegerTy()) { 1795 AddOp = Instruction::Add; 1796 MulOp = Instruction::Mul; 1797 } else { 1798 AddOp = II.getInductionOpcode(); 1799 MulOp = Instruction::FMul; 1800 } 1801 1802 // Multiply the vectorization factor by the step using integer or 1803 // floating-point arithmetic as appropriate. 1804 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1805 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1806 1807 // Create a vector splat to use in the induction update. 1808 // 1809 // FIXME: If the step is non-constant, we create the vector splat with 1810 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1811 // handle a constant vector splat. 1812 Value *SplatVF = isa<Constant>(Mul) 1813 ? ConstantVector::getSplat(ElementCount::getFixed(VF), 1814 cast<Constant>(Mul)) 1815 : Builder.CreateVectorSplat(VF, Mul); 1816 Builder.restoreIP(CurrIP); 1817 1818 // We may need to add the step a number of times, depending on the unroll 1819 // factor. The last of those goes into the PHI. 1820 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1821 &*LoopVectorBody->getFirstInsertionPt()); 1822 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1823 Instruction *LastInduction = VecInd; 1824 for (unsigned Part = 0; Part < UF; ++Part) { 1825 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1826 1827 if (isa<TruncInst>(EntryVal)) 1828 addMetadata(LastInduction, EntryVal); 1829 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1830 1831 LastInduction = cast<Instruction>(addFastMathFlag( 1832 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1833 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1834 } 1835 1836 // Move the last step to the end of the latch block. This ensures consistent 1837 // placement of all induction updates. 1838 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1839 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1840 auto *ICmp = cast<Instruction>(Br->getCondition()); 1841 LastInduction->moveBefore(ICmp); 1842 LastInduction->setName("vec.ind.next"); 1843 1844 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1845 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1846 } 1847 1848 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1849 return Cost->isScalarAfterVectorization(I, VF) || 1850 Cost->isProfitableToScalarize(I, VF); 1851 } 1852 1853 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1854 if (shouldScalarizeInstruction(IV)) 1855 return true; 1856 auto isScalarInst = [&](User *U) -> bool { 1857 auto *I = cast<Instruction>(U); 1858 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1859 }; 1860 return llvm::any_of(IV->users(), isScalarInst); 1861 } 1862 1863 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1864 const InductionDescriptor &ID, const Instruction *EntryVal, 1865 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1866 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1867 "Expected either an induction phi-node or a truncate of it!"); 1868 1869 // This induction variable is not the phi from the original loop but the 1870 // newly-created IV based on the proof that casted Phi is equal to the 1871 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1872 // re-uses the same InductionDescriptor that original IV uses but we don't 1873 // have to do any recording in this case - that is done when original IV is 1874 // processed. 1875 if (isa<TruncInst>(EntryVal)) 1876 return; 1877 1878 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1879 if (Casts.empty()) 1880 return; 1881 // Only the first Cast instruction in the Casts vector is of interest. 1882 // The rest of the Casts (if exist) have no uses outside the 1883 // induction update chain itself. 1884 Instruction *CastInst = *Casts.begin(); 1885 if (Lane < UINT_MAX) 1886 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1887 else 1888 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1889 } 1890 1891 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1892 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1893 "Primary induction variable must have an integer type"); 1894 1895 auto II = Legal->getInductionVars().find(IV); 1896 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 1897 1898 auto ID = II->second; 1899 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1900 1901 // The value from the original loop to which we are mapping the new induction 1902 // variable. 1903 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1904 1905 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1906 1907 // Generate code for the induction step. Note that induction steps are 1908 // required to be loop-invariant 1909 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 1910 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 1911 "Induction step should be loop invariant"); 1912 if (PSE.getSE()->isSCEVable(IV->getType())) { 1913 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1914 return Exp.expandCodeFor(Step, Step->getType(), 1915 LoopVectorPreHeader->getTerminator()); 1916 } 1917 return cast<SCEVUnknown>(Step)->getValue(); 1918 }; 1919 1920 // The scalar value to broadcast. This is derived from the canonical 1921 // induction variable. If a truncation type is given, truncate the canonical 1922 // induction variable and step. Otherwise, derive these values from the 1923 // induction descriptor. 1924 auto CreateScalarIV = [&](Value *&Step) -> Value * { 1925 Value *ScalarIV = Induction; 1926 if (IV != OldInduction) { 1927 ScalarIV = IV->getType()->isIntegerTy() 1928 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1929 : Builder.CreateCast(Instruction::SIToFP, Induction, 1930 IV->getType()); 1931 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1932 ScalarIV->setName("offset.idx"); 1933 } 1934 if (Trunc) { 1935 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1936 assert(Step->getType()->isIntegerTy() && 1937 "Truncation requires an integer step"); 1938 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1939 Step = Builder.CreateTrunc(Step, TruncType); 1940 } 1941 return ScalarIV; 1942 }; 1943 1944 // Create the vector values from the scalar IV, in the absence of creating a 1945 // vector IV. 1946 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 1947 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1948 for (unsigned Part = 0; Part < UF; ++Part) { 1949 Value *EntryPart = 1950 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1951 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1952 if (Trunc) 1953 addMetadata(EntryPart, Trunc); 1954 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1955 } 1956 }; 1957 1958 // Now do the actual transformations, and start with creating the step value. 1959 Value *Step = CreateStepValue(ID.getStep()); 1960 if (VF <= 1) { 1961 Value *ScalarIV = CreateScalarIV(Step); 1962 CreateSplatIV(ScalarIV, Step); 1963 return; 1964 } 1965 1966 // Determine if we want a scalar version of the induction variable. This is 1967 // true if the induction variable itself is not widened, or if it has at 1968 // least one user in the loop that is not widened. 1969 auto NeedsScalarIV = needsScalarInduction(EntryVal); 1970 if (!NeedsScalarIV) { 1971 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1972 return; 1973 } 1974 1975 // Try to create a new independent vector induction variable. If we can't 1976 // create the phi node, we will splat the scalar induction variable in each 1977 // loop iteration. 1978 if (!shouldScalarizeInstruction(EntryVal)) { 1979 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1980 Value *ScalarIV = CreateScalarIV(Step); 1981 // Create scalar steps that can be used by instructions we will later 1982 // scalarize. Note that the addition of the scalar steps will not increase 1983 // the number of instructions in the loop in the common case prior to 1984 // InstCombine. We will be trading one vector extract for each scalar step. 1985 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1986 return; 1987 } 1988 1989 // All IV users are scalar instructions, so only emit a scalar IV, not a 1990 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 1991 // predicate used by the masked loads/stores. 1992 Value *ScalarIV = CreateScalarIV(Step); 1993 if (!Cost->isScalarEpilogueAllowed()) 1994 CreateSplatIV(ScalarIV, Step); 1995 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1996 } 1997 1998 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1999 Instruction::BinaryOps BinOp) { 2000 // Create and check the types. 2001 auto *ValVTy = cast<VectorType>(Val->getType()); 2002 int VLen = ValVTy->getNumElements(); 2003 2004 Type *STy = Val->getType()->getScalarType(); 2005 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2006 "Induction Step must be an integer or FP"); 2007 assert(Step->getType() == STy && "Step has wrong type"); 2008 2009 SmallVector<Constant *, 8> Indices; 2010 2011 if (STy->isIntegerTy()) { 2012 // Create a vector of consecutive numbers from zero to VF. 2013 for (int i = 0; i < VLen; ++i) 2014 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2015 2016 // Add the consecutive indices to the vector value. 2017 Constant *Cv = ConstantVector::get(Indices); 2018 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2019 Step = Builder.CreateVectorSplat(VLen, Step); 2020 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2021 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2022 // which can be found from the original scalar operations. 2023 Step = Builder.CreateMul(Cv, Step); 2024 return Builder.CreateAdd(Val, Step, "induction"); 2025 } 2026 2027 // Floating point induction. 2028 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2029 "Binary Opcode should be specified for FP induction"); 2030 // Create a vector of consecutive numbers from zero to VF. 2031 for (int i = 0; i < VLen; ++i) 2032 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2033 2034 // Add the consecutive indices to the vector value. 2035 Constant *Cv = ConstantVector::get(Indices); 2036 2037 Step = Builder.CreateVectorSplat(VLen, Step); 2038 2039 // Floating point operations had to be 'fast' to enable the induction. 2040 FastMathFlags Flags; 2041 Flags.setFast(); 2042 2043 Value *MulOp = Builder.CreateFMul(Cv, Step); 2044 if (isa<Instruction>(MulOp)) 2045 // Have to check, MulOp may be a constant 2046 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2047 2048 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2049 if (isa<Instruction>(BOp)) 2050 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2051 return BOp; 2052 } 2053 2054 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2055 Instruction *EntryVal, 2056 const InductionDescriptor &ID) { 2057 // We shouldn't have to build scalar steps if we aren't vectorizing. 2058 assert(VF > 1 && "VF should be greater than one"); 2059 2060 // Get the value type and ensure it and the step have the same integer type. 2061 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2062 assert(ScalarIVTy == Step->getType() && 2063 "Val and Step should have the same type"); 2064 2065 // We build scalar steps for both integer and floating-point induction 2066 // variables. Here, we determine the kind of arithmetic we will perform. 2067 Instruction::BinaryOps AddOp; 2068 Instruction::BinaryOps MulOp; 2069 if (ScalarIVTy->isIntegerTy()) { 2070 AddOp = Instruction::Add; 2071 MulOp = Instruction::Mul; 2072 } else { 2073 AddOp = ID.getInductionOpcode(); 2074 MulOp = Instruction::FMul; 2075 } 2076 2077 // Determine the number of scalars we need to generate for each unroll 2078 // iteration. If EntryVal is uniform, we only need to generate the first 2079 // lane. Otherwise, we generate all VF values. 2080 unsigned Lanes = 2081 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 2082 : VF; 2083 // Compute the scalar steps and save the results in VectorLoopValueMap. 2084 for (unsigned Part = 0; Part < UF; ++Part) { 2085 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2086 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2087 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2088 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2089 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2090 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 2091 } 2092 } 2093 } 2094 2095 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2096 assert(V != Induction && "The new induction variable should not be used."); 2097 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2098 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2099 2100 // If we have a stride that is replaced by one, do it here. Defer this for 2101 // the VPlan-native path until we start running Legal checks in that path. 2102 if (!EnableVPlanNativePath && Legal->hasStride(V)) 2103 V = ConstantInt::get(V->getType(), 1); 2104 2105 // If we have a vector mapped to this value, return it. 2106 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2107 return VectorLoopValueMap.getVectorValue(V, Part); 2108 2109 // If the value has not been vectorized, check if it has been scalarized 2110 // instead. If it has been scalarized, and we actually need the value in 2111 // vector form, we will construct the vector values on demand. 2112 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2113 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2114 2115 // If we've scalarized a value, that value should be an instruction. 2116 auto *I = cast<Instruction>(V); 2117 2118 // If we aren't vectorizing, we can just copy the scalar map values over to 2119 // the vector map. 2120 if (VF == 1) { 2121 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2122 return ScalarValue; 2123 } 2124 2125 // Get the last scalar instruction we generated for V and Part. If the value 2126 // is known to be uniform after vectorization, this corresponds to lane zero 2127 // of the Part unroll iteration. Otherwise, the last instruction is the one 2128 // we created for the last vector lane of the Part unroll iteration. 2129 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2130 auto *LastInst = cast<Instruction>( 2131 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2132 2133 // Set the insert point after the last scalarized instruction. This ensures 2134 // the insertelement sequence will directly follow the scalar definitions. 2135 auto OldIP = Builder.saveIP(); 2136 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2137 Builder.SetInsertPoint(&*NewIP); 2138 2139 // However, if we are vectorizing, we need to construct the vector values. 2140 // If the value is known to be uniform after vectorization, we can just 2141 // broadcast the scalar value corresponding to lane zero for each unroll 2142 // iteration. Otherwise, we construct the vector values using insertelement 2143 // instructions. Since the resulting vectors are stored in 2144 // VectorLoopValueMap, we will only generate the insertelements once. 2145 Value *VectorValue = nullptr; 2146 if (Cost->isUniformAfterVectorization(I, VF)) { 2147 VectorValue = getBroadcastInstrs(ScalarValue); 2148 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2149 } else { 2150 // Initialize packing with insertelements to start from undef. 2151 Value *Undef = UndefValue::get(FixedVectorType::get(V->getType(), VF)); 2152 VectorLoopValueMap.setVectorValue(V, Part, Undef); 2153 for (unsigned Lane = 0; Lane < VF; ++Lane) 2154 packScalarIntoVectorValue(V, {Part, Lane}); 2155 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2156 } 2157 Builder.restoreIP(OldIP); 2158 return VectorValue; 2159 } 2160 2161 // If this scalar is unknown, assume that it is a constant or that it is 2162 // loop invariant. Broadcast V and save the value for future uses. 2163 Value *B = getBroadcastInstrs(V); 2164 VectorLoopValueMap.setVectorValue(V, Part, B); 2165 return B; 2166 } 2167 2168 Value * 2169 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2170 const VPIteration &Instance) { 2171 // If the value is not an instruction contained in the loop, it should 2172 // already be scalar. 2173 if (OrigLoop->isLoopInvariant(V)) 2174 return V; 2175 2176 assert(Instance.Lane > 0 2177 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2178 : true && "Uniform values only have lane zero"); 2179 2180 // If the value from the original loop has not been vectorized, it is 2181 // represented by UF x VF scalar values in the new loop. Return the requested 2182 // scalar value. 2183 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2184 return VectorLoopValueMap.getScalarValue(V, Instance); 2185 2186 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2187 // for the given unroll part. If this entry is not a vector type (i.e., the 2188 // vectorization factor is one), there is no need to generate an 2189 // extractelement instruction. 2190 auto *U = getOrCreateVectorValue(V, Instance.Part); 2191 if (!U->getType()->isVectorTy()) { 2192 assert(VF == 1 && "Value not scalarized has non-vector type"); 2193 return U; 2194 } 2195 2196 // Otherwise, the value from the original loop has been vectorized and is 2197 // represented by UF vector values. Extract and return the requested scalar 2198 // value from the appropriate vector lane. 2199 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2200 } 2201 2202 void InnerLoopVectorizer::packScalarIntoVectorValue( 2203 Value *V, const VPIteration &Instance) { 2204 assert(V != Induction && "The new induction variable should not be used."); 2205 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2206 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2207 2208 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2209 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2210 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2211 Builder.getInt32(Instance.Lane)); 2212 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2213 } 2214 2215 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2216 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2217 SmallVector<int, 8> ShuffleMask; 2218 for (unsigned i = 0; i < VF; ++i) 2219 ShuffleMask.push_back(VF - i - 1); 2220 2221 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2222 ShuffleMask, "reverse"); 2223 } 2224 2225 // Return whether we allow using masked interleave-groups (for dealing with 2226 // strided loads/stores that reside in predicated blocks, or for dealing 2227 // with gaps). 2228 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2229 // If an override option has been passed in for interleaved accesses, use it. 2230 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2231 return EnableMaskedInterleavedMemAccesses; 2232 2233 return TTI.enableMaskedInterleavedAccessVectorization(); 2234 } 2235 2236 // Try to vectorize the interleave group that \p Instr belongs to. 2237 // 2238 // E.g. Translate following interleaved load group (factor = 3): 2239 // for (i = 0; i < N; i+=3) { 2240 // R = Pic[i]; // Member of index 0 2241 // G = Pic[i+1]; // Member of index 1 2242 // B = Pic[i+2]; // Member of index 2 2243 // ... // do something to R, G, B 2244 // } 2245 // To: 2246 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2247 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2248 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2249 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2250 // 2251 // Or translate following interleaved store group (factor = 3): 2252 // for (i = 0; i < N; i+=3) { 2253 // ... do something to R, G, B 2254 // Pic[i] = R; // Member of index 0 2255 // Pic[i+1] = G; // Member of index 1 2256 // Pic[i+2] = B; // Member of index 2 2257 // } 2258 // To: 2259 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2260 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2261 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2262 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2263 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2264 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2265 const InterleaveGroup<Instruction> *Group, VPTransformState &State, 2266 VPValue *Addr, VPValue *BlockInMask) { 2267 Instruction *Instr = Group->getInsertPos(); 2268 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2269 2270 // Prepare for the vector type of the interleaved load/store. 2271 Type *ScalarTy = getMemInstValueType(Instr); 2272 unsigned InterleaveFactor = Group->getFactor(); 2273 auto *VecTy = FixedVectorType::get(ScalarTy, InterleaveFactor * VF); 2274 2275 // Prepare for the new pointers. 2276 SmallVector<Value *, 2> AddrParts; 2277 unsigned Index = Group->getIndex(Instr); 2278 2279 // TODO: extend the masked interleaved-group support to reversed access. 2280 assert((!BlockInMask || !Group->isReverse()) && 2281 "Reversed masked interleave-group not supported."); 2282 2283 // If the group is reverse, adjust the index to refer to the last vector lane 2284 // instead of the first. We adjust the index from the first vector lane, 2285 // rather than directly getting the pointer for lane VF - 1, because the 2286 // pointer operand of the interleaved access is supposed to be uniform. For 2287 // uniform instructions, we're only required to generate a value for the 2288 // first vector lane in each unroll iteration. 2289 if (Group->isReverse()) 2290 Index += (VF - 1) * Group->getFactor(); 2291 2292 for (unsigned Part = 0; Part < UF; Part++) { 2293 Value *AddrPart = State.get(Addr, {Part, 0}); 2294 setDebugLocFromInst(Builder, AddrPart); 2295 2296 // Notice current instruction could be any index. Need to adjust the address 2297 // to the member of index 0. 2298 // 2299 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2300 // b = A[i]; // Member of index 0 2301 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2302 // 2303 // E.g. A[i+1] = a; // Member of index 1 2304 // A[i] = b; // Member of index 0 2305 // A[i+2] = c; // Member of index 2 (Current instruction) 2306 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2307 2308 bool InBounds = false; 2309 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2310 InBounds = gep->isInBounds(); 2311 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2312 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2313 2314 // Cast to the vector pointer type. 2315 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2316 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2317 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2318 } 2319 2320 setDebugLocFromInst(Builder, Instr); 2321 Value *UndefVec = UndefValue::get(VecTy); 2322 2323 Value *MaskForGaps = nullptr; 2324 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2325 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2326 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2327 } 2328 2329 // Vectorize the interleaved load group. 2330 if (isa<LoadInst>(Instr)) { 2331 // For each unroll part, create a wide load for the group. 2332 SmallVector<Value *, 2> NewLoads; 2333 for (unsigned Part = 0; Part < UF; Part++) { 2334 Instruction *NewLoad; 2335 if (BlockInMask || MaskForGaps) { 2336 assert(useMaskedInterleavedAccesses(*TTI) && 2337 "masked interleaved groups are not allowed."); 2338 Value *GroupMask = MaskForGaps; 2339 if (BlockInMask) { 2340 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2341 auto *Undefs = UndefValue::get(BlockInMaskPart->getType()); 2342 Value *ShuffledMask = Builder.CreateShuffleVector( 2343 BlockInMaskPart, Undefs, 2344 createReplicatedMask(InterleaveFactor, VF), "interleaved.mask"); 2345 GroupMask = MaskForGaps 2346 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2347 MaskForGaps) 2348 : ShuffledMask; 2349 } 2350 NewLoad = 2351 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2352 GroupMask, UndefVec, "wide.masked.vec"); 2353 } 2354 else 2355 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2356 Group->getAlign(), "wide.vec"); 2357 Group->addMetadata(NewLoad); 2358 NewLoads.push_back(NewLoad); 2359 } 2360 2361 // For each member in the group, shuffle out the appropriate data from the 2362 // wide loads. 2363 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2364 Instruction *Member = Group->getMember(I); 2365 2366 // Skip the gaps in the group. 2367 if (!Member) 2368 continue; 2369 2370 auto StrideMask = createStrideMask(I, InterleaveFactor, VF); 2371 for (unsigned Part = 0; Part < UF; Part++) { 2372 Value *StridedVec = Builder.CreateShuffleVector( 2373 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2374 2375 // If this member has different type, cast the result type. 2376 if (Member->getType() != ScalarTy) { 2377 VectorType *OtherVTy = FixedVectorType::get(Member->getType(), VF); 2378 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2379 } 2380 2381 if (Group->isReverse()) 2382 StridedVec = reverseVector(StridedVec); 2383 2384 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2385 } 2386 } 2387 return; 2388 } 2389 2390 // The sub vector type for current instruction. 2391 auto *SubVT = FixedVectorType::get(ScalarTy, VF); 2392 2393 // Vectorize the interleaved store group. 2394 for (unsigned Part = 0; Part < UF; Part++) { 2395 // Collect the stored vector from each member. 2396 SmallVector<Value *, 4> StoredVecs; 2397 for (unsigned i = 0; i < InterleaveFactor; i++) { 2398 // Interleaved store group doesn't allow a gap, so each index has a member 2399 Instruction *Member = Group->getMember(i); 2400 assert(Member && "Fail to get a member from an interleaved store group"); 2401 2402 Value *StoredVec = getOrCreateVectorValue( 2403 cast<StoreInst>(Member)->getValueOperand(), Part); 2404 if (Group->isReverse()) 2405 StoredVec = reverseVector(StoredVec); 2406 2407 // If this member has different type, cast it to a unified type. 2408 2409 if (StoredVec->getType() != SubVT) 2410 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2411 2412 StoredVecs.push_back(StoredVec); 2413 } 2414 2415 // Concatenate all vectors into a wide vector. 2416 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2417 2418 // Interleave the elements in the wide vector. 2419 Value *IVec = Builder.CreateShuffleVector( 2420 WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor), 2421 "interleaved.vec"); 2422 2423 Instruction *NewStoreInstr; 2424 if (BlockInMask) { 2425 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2426 auto *Undefs = UndefValue::get(BlockInMaskPart->getType()); 2427 Value *ShuffledMask = Builder.CreateShuffleVector( 2428 BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF), 2429 "interleaved.mask"); 2430 NewStoreInstr = Builder.CreateMaskedStore( 2431 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2432 } 2433 else 2434 NewStoreInstr = 2435 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2436 2437 Group->addMetadata(NewStoreInstr); 2438 } 2439 } 2440 2441 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2442 VPTransformState &State, 2443 VPValue *Addr, 2444 VPValue *StoredValue, 2445 VPValue *BlockInMask) { 2446 // Attempt to issue a wide load. 2447 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2448 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2449 2450 assert((LI || SI) && "Invalid Load/Store instruction"); 2451 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2452 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2453 2454 LoopVectorizationCostModel::InstWidening Decision = 2455 Cost->getWideningDecision(Instr, VF); 2456 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2457 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2458 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2459 "CM decision is not to widen the memory instruction"); 2460 2461 Type *ScalarDataTy = getMemInstValueType(Instr); 2462 auto *DataTy = FixedVectorType::get(ScalarDataTy, VF); 2463 const Align Alignment = getLoadStoreAlignment(Instr); 2464 2465 // Determine if the pointer operand of the access is either consecutive or 2466 // reverse consecutive. 2467 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2468 bool ConsecutiveStride = 2469 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2470 bool CreateGatherScatter = 2471 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2472 2473 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2474 // gather/scatter. Otherwise Decision should have been to Scalarize. 2475 assert((ConsecutiveStride || CreateGatherScatter) && 2476 "The instruction should be scalarized"); 2477 (void)ConsecutiveStride; 2478 2479 VectorParts BlockInMaskParts(UF); 2480 bool isMaskRequired = BlockInMask; 2481 if (isMaskRequired) 2482 for (unsigned Part = 0; Part < UF; ++Part) 2483 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2484 2485 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2486 // Calculate the pointer for the specific unroll-part. 2487 GetElementPtrInst *PartPtr = nullptr; 2488 2489 bool InBounds = false; 2490 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2491 InBounds = gep->isInBounds(); 2492 2493 if (Reverse) { 2494 // If the address is consecutive but reversed, then the 2495 // wide store needs to start at the last vector element. 2496 PartPtr = cast<GetElementPtrInst>( 2497 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF))); 2498 PartPtr->setIsInBounds(InBounds); 2499 PartPtr = cast<GetElementPtrInst>( 2500 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF))); 2501 PartPtr->setIsInBounds(InBounds); 2502 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2503 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2504 } else { 2505 PartPtr = cast<GetElementPtrInst>( 2506 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF))); 2507 PartPtr->setIsInBounds(InBounds); 2508 } 2509 2510 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2511 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2512 }; 2513 2514 // Handle Stores: 2515 if (SI) { 2516 setDebugLocFromInst(Builder, SI); 2517 2518 for (unsigned Part = 0; Part < UF; ++Part) { 2519 Instruction *NewSI = nullptr; 2520 Value *StoredVal = State.get(StoredValue, Part); 2521 if (CreateGatherScatter) { 2522 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2523 Value *VectorGep = State.get(Addr, Part); 2524 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2525 MaskPart); 2526 } else { 2527 if (Reverse) { 2528 // If we store to reverse consecutive memory locations, then we need 2529 // to reverse the order of elements in the stored value. 2530 StoredVal = reverseVector(StoredVal); 2531 // We don't want to update the value in the map as it might be used in 2532 // another expression. So don't call resetVectorValue(StoredVal). 2533 } 2534 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2535 if (isMaskRequired) 2536 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2537 BlockInMaskParts[Part]); 2538 else 2539 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2540 } 2541 addMetadata(NewSI, SI); 2542 } 2543 return; 2544 } 2545 2546 // Handle loads. 2547 assert(LI && "Must have a load instruction"); 2548 setDebugLocFromInst(Builder, LI); 2549 for (unsigned Part = 0; Part < UF; ++Part) { 2550 Value *NewLI; 2551 if (CreateGatherScatter) { 2552 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2553 Value *VectorGep = State.get(Addr, Part); 2554 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2555 nullptr, "wide.masked.gather"); 2556 addMetadata(NewLI, LI); 2557 } else { 2558 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2559 if (isMaskRequired) 2560 NewLI = Builder.CreateMaskedLoad( 2561 VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy), 2562 "wide.masked.load"); 2563 else 2564 NewLI = 2565 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2566 2567 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2568 addMetadata(NewLI, LI); 2569 if (Reverse) 2570 NewLI = reverseVector(NewLI); 2571 } 2572 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2573 } 2574 } 2575 2576 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPUser &User, 2577 const VPIteration &Instance, 2578 bool IfPredicateInstr, 2579 VPTransformState &State) { 2580 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2581 2582 setDebugLocFromInst(Builder, Instr); 2583 2584 // Does this instruction return a value ? 2585 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2586 2587 Instruction *Cloned = Instr->clone(); 2588 if (!IsVoidRetTy) 2589 Cloned->setName(Instr->getName() + ".cloned"); 2590 2591 // Replace the operands of the cloned instructions with their scalar 2592 // equivalents in the new loop. 2593 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 2594 auto *NewOp = State.get(User.getOperand(op), Instance); 2595 Cloned->setOperand(op, NewOp); 2596 } 2597 addNewMetadata(Cloned, Instr); 2598 2599 // Place the cloned scalar in the new loop. 2600 Builder.Insert(Cloned); 2601 2602 // Add the cloned scalar to the scalar map entry. 2603 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2604 2605 // If we just cloned a new assumption, add it the assumption cache. 2606 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2607 if (II->getIntrinsicID() == Intrinsic::assume) 2608 AC->registerAssumption(II); 2609 2610 // End if-block. 2611 if (IfPredicateInstr) 2612 PredicatedInstructions.push_back(Cloned); 2613 } 2614 2615 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2616 Value *End, Value *Step, 2617 Instruction *DL) { 2618 BasicBlock *Header = L->getHeader(); 2619 BasicBlock *Latch = L->getLoopLatch(); 2620 // As we're just creating this loop, it's possible no latch exists 2621 // yet. If so, use the header as this will be a single block loop. 2622 if (!Latch) 2623 Latch = Header; 2624 2625 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2626 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2627 setDebugLocFromInst(Builder, OldInst); 2628 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2629 2630 Builder.SetInsertPoint(Latch->getTerminator()); 2631 setDebugLocFromInst(Builder, OldInst); 2632 2633 // Create i+1 and fill the PHINode. 2634 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2635 Induction->addIncoming(Start, L->getLoopPreheader()); 2636 Induction->addIncoming(Next, Latch); 2637 // Create the compare. 2638 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2639 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2640 2641 // Now we have two terminators. Remove the old one from the block. 2642 Latch->getTerminator()->eraseFromParent(); 2643 2644 return Induction; 2645 } 2646 2647 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2648 if (TripCount) 2649 return TripCount; 2650 2651 assert(L && "Create Trip Count for null loop."); 2652 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2653 // Find the loop boundaries. 2654 ScalarEvolution *SE = PSE.getSE(); 2655 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2656 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2657 "Invalid loop count"); 2658 2659 Type *IdxTy = Legal->getWidestInductionType(); 2660 assert(IdxTy && "No type for induction"); 2661 2662 // The exit count might have the type of i64 while the phi is i32. This can 2663 // happen if we have an induction variable that is sign extended before the 2664 // compare. The only way that we get a backedge taken count is that the 2665 // induction variable was signed and as such will not overflow. In such a case 2666 // truncation is legal. 2667 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2668 IdxTy->getPrimitiveSizeInBits()) 2669 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2670 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2671 2672 // Get the total trip count from the count by adding 1. 2673 const SCEV *ExitCount = SE->getAddExpr( 2674 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2675 2676 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2677 2678 // Expand the trip count and place the new instructions in the preheader. 2679 // Notice that the pre-header does not change, only the loop body. 2680 SCEVExpander Exp(*SE, DL, "induction"); 2681 2682 // Count holds the overall loop count (N). 2683 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2684 L->getLoopPreheader()->getTerminator()); 2685 2686 if (TripCount->getType()->isPointerTy()) 2687 TripCount = 2688 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2689 L->getLoopPreheader()->getTerminator()); 2690 2691 return TripCount; 2692 } 2693 2694 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2695 if (VectorTripCount) 2696 return VectorTripCount; 2697 2698 Value *TC = getOrCreateTripCount(L); 2699 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2700 2701 Type *Ty = TC->getType(); 2702 Constant *Step = ConstantInt::get(Ty, VF * UF); 2703 2704 // If the tail is to be folded by masking, round the number of iterations N 2705 // up to a multiple of Step instead of rounding down. This is done by first 2706 // adding Step-1 and then rounding down. Note that it's ok if this addition 2707 // overflows: the vector induction variable will eventually wrap to zero given 2708 // that it starts at zero and its Step is a power of two; the loop will then 2709 // exit, with the last early-exit vector comparison also producing all-true. 2710 if (Cost->foldTailByMasking()) { 2711 assert(isPowerOf2_32(VF * UF) && 2712 "VF*UF must be a power of 2 when folding tail by masking"); 2713 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2714 } 2715 2716 // Now we need to generate the expression for the part of the loop that the 2717 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2718 // iterations are not required for correctness, or N - Step, otherwise. Step 2719 // is equal to the vectorization factor (number of SIMD elements) times the 2720 // unroll factor (number of SIMD instructions). 2721 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2722 2723 // If there is a non-reversed interleaved group that may speculatively access 2724 // memory out-of-bounds, we need to ensure that there will be at least one 2725 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2726 // the trip count, we set the remainder to be equal to the step. If the step 2727 // does not evenly divide the trip count, no adjustment is necessary since 2728 // there will already be scalar iterations. Note that the minimum iterations 2729 // check ensures that N >= Step. 2730 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2731 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2732 R = Builder.CreateSelect(IsZero, Step, R); 2733 } 2734 2735 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2736 2737 return VectorTripCount; 2738 } 2739 2740 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2741 const DataLayout &DL) { 2742 // Verify that V is a vector type with same number of elements as DstVTy. 2743 unsigned VF = DstVTy->getNumElements(); 2744 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2745 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2746 Type *SrcElemTy = SrcVecTy->getElementType(); 2747 Type *DstElemTy = DstVTy->getElementType(); 2748 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2749 "Vector elements must have same size"); 2750 2751 // Do a direct cast if element types are castable. 2752 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2753 return Builder.CreateBitOrPointerCast(V, DstVTy); 2754 } 2755 // V cannot be directly casted to desired vector type. 2756 // May happen when V is a floating point vector but DstVTy is a vector of 2757 // pointers or vice-versa. Handle this using a two-step bitcast using an 2758 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2759 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2760 "Only one type should be a pointer type"); 2761 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2762 "Only one type should be a floating point type"); 2763 Type *IntTy = 2764 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2765 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2766 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2767 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2768 } 2769 2770 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2771 BasicBlock *Bypass) { 2772 Value *Count = getOrCreateTripCount(L); 2773 // Reuse existing vector loop preheader for TC checks. 2774 // Note that new preheader block is generated for vector loop. 2775 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2776 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2777 2778 // Generate code to check if the loop's trip count is less than VF * UF, or 2779 // equal to it in case a scalar epilogue is required; this implies that the 2780 // vector trip count is zero. This check also covers the case where adding one 2781 // to the backedge-taken count overflowed leading to an incorrect trip count 2782 // of zero. In this case we will also jump to the scalar loop. 2783 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2784 : ICmpInst::ICMP_ULT; 2785 2786 // If tail is to be folded, vector loop takes care of all iterations. 2787 Value *CheckMinIters = Builder.getFalse(); 2788 if (!Cost->foldTailByMasking()) 2789 CheckMinIters = Builder.CreateICmp( 2790 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2791 "min.iters.check"); 2792 2793 // Create new preheader for vector loop. 2794 LoopVectorPreHeader = 2795 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2796 "vector.ph"); 2797 2798 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2799 DT->getNode(Bypass)->getIDom()) && 2800 "TC check is expected to dominate Bypass"); 2801 2802 // Update dominator for Bypass & LoopExit. 2803 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2804 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2805 2806 ReplaceInstWithInst( 2807 TCCheckBlock->getTerminator(), 2808 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2809 LoopBypassBlocks.push_back(TCCheckBlock); 2810 } 2811 2812 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2813 // Reuse existing vector loop preheader for SCEV checks. 2814 // Note that new preheader block is generated for vector loop. 2815 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 2816 2817 // Generate the code to check that the SCEV assumptions that we made. 2818 // We want the new basic block to start at the first instruction in a 2819 // sequence of instructions that form a check. 2820 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2821 "scev.check"); 2822 Value *SCEVCheck = Exp.expandCodeForPredicate( 2823 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 2824 2825 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2826 if (C->isZero()) 2827 return; 2828 2829 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 2830 (OptForSizeBasedOnProfile && 2831 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 2832 "Cannot SCEV check stride or overflow when optimizing for size"); 2833 2834 SCEVCheckBlock->setName("vector.scevcheck"); 2835 // Create new preheader for vector loop. 2836 LoopVectorPreHeader = 2837 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 2838 nullptr, "vector.ph"); 2839 2840 // Update dominator only if this is first RT check. 2841 if (LoopBypassBlocks.empty()) { 2842 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 2843 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 2844 } 2845 2846 ReplaceInstWithInst( 2847 SCEVCheckBlock->getTerminator(), 2848 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 2849 LoopBypassBlocks.push_back(SCEVCheckBlock); 2850 AddedSafetyChecks = true; 2851 } 2852 2853 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2854 // VPlan-native path does not do any analysis for runtime checks currently. 2855 if (EnableVPlanNativePath) 2856 return; 2857 2858 // Reuse existing vector loop preheader for runtime memory checks. 2859 // Note that new preheader block is generated for vector loop. 2860 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 2861 2862 // Generate the code that checks in runtime if arrays overlap. We put the 2863 // checks into a separate block to make the more common case of few elements 2864 // faster. 2865 auto *LAI = Legal->getLAI(); 2866 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking(); 2867 if (!RtPtrChecking.Need) 2868 return; 2869 Instruction *FirstCheckInst; 2870 Instruction *MemRuntimeCheck; 2871 std::tie(FirstCheckInst, MemRuntimeCheck) = 2872 addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop, 2873 RtPtrChecking.getChecks(), RtPtrChecking.getSE()); 2874 assert(MemRuntimeCheck && "no RT checks generated although RtPtrChecking " 2875 "claimed checks are required"); 2876 2877 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 2878 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 2879 "Cannot emit memory checks when optimizing for size, unless forced " 2880 "to vectorize."); 2881 ORE->emit([&]() { 2882 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 2883 L->getStartLoc(), L->getHeader()) 2884 << "Code-size may be reduced by not forcing " 2885 "vectorization, or by source-code modifications " 2886 "eliminating the need for runtime checks " 2887 "(e.g., adding 'restrict')."; 2888 }); 2889 } 2890 2891 MemCheckBlock->setName("vector.memcheck"); 2892 // Create new preheader for vector loop. 2893 LoopVectorPreHeader = 2894 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 2895 "vector.ph"); 2896 2897 // Update dominator only if this is first RT check. 2898 if (LoopBypassBlocks.empty()) { 2899 DT->changeImmediateDominator(Bypass, MemCheckBlock); 2900 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 2901 } 2902 2903 ReplaceInstWithInst( 2904 MemCheckBlock->getTerminator(), 2905 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck)); 2906 LoopBypassBlocks.push_back(MemCheckBlock); 2907 AddedSafetyChecks = true; 2908 2909 // We currently don't use LoopVersioning for the actual loop cloning but we 2910 // still use it to add the noalias metadata. 2911 LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2912 PSE.getSE()); 2913 LVer->prepareNoAliasMetadata(); 2914 } 2915 2916 Value *InnerLoopVectorizer::emitTransformedIndex( 2917 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2918 const InductionDescriptor &ID) const { 2919 2920 SCEVExpander Exp(*SE, DL, "induction"); 2921 auto Step = ID.getStep(); 2922 auto StartValue = ID.getStartValue(); 2923 assert(Index->getType() == Step->getType() && 2924 "Index type does not match StepValue type"); 2925 2926 // Note: the IR at this point is broken. We cannot use SE to create any new 2927 // SCEV and then expand it, hoping that SCEV's simplification will give us 2928 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2929 // lead to various SCEV crashes. So all we can do is to use builder and rely 2930 // on InstCombine for future simplifications. Here we handle some trivial 2931 // cases only. 2932 auto CreateAdd = [&B](Value *X, Value *Y) { 2933 assert(X->getType() == Y->getType() && "Types don't match!"); 2934 if (auto *CX = dyn_cast<ConstantInt>(X)) 2935 if (CX->isZero()) 2936 return Y; 2937 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2938 if (CY->isZero()) 2939 return X; 2940 return B.CreateAdd(X, Y); 2941 }; 2942 2943 auto CreateMul = [&B](Value *X, Value *Y) { 2944 assert(X->getType() == Y->getType() && "Types don't match!"); 2945 if (auto *CX = dyn_cast<ConstantInt>(X)) 2946 if (CX->isOne()) 2947 return Y; 2948 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2949 if (CY->isOne()) 2950 return X; 2951 return B.CreateMul(X, Y); 2952 }; 2953 2954 // Get a suitable insert point for SCEV expansion. For blocks in the vector 2955 // loop, choose the end of the vector loop header (=LoopVectorBody), because 2956 // the DomTree is not kept up-to-date for additional blocks generated in the 2957 // vector loop. By using the header as insertion point, we guarantee that the 2958 // expanded instructions dominate all their uses. 2959 auto GetInsertPoint = [this, &B]() { 2960 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 2961 if (InsertBB != LoopVectorBody && 2962 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 2963 return LoopVectorBody->getTerminator(); 2964 return &*B.GetInsertPoint(); 2965 }; 2966 switch (ID.getKind()) { 2967 case InductionDescriptor::IK_IntInduction: { 2968 assert(Index->getType() == StartValue->getType() && 2969 "Index type does not match StartValue type"); 2970 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2971 return B.CreateSub(StartValue, Index); 2972 auto *Offset = CreateMul( 2973 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 2974 return CreateAdd(StartValue, Offset); 2975 } 2976 case InductionDescriptor::IK_PtrInduction: { 2977 assert(isa<SCEVConstant>(Step) && 2978 "Expected constant step for pointer induction"); 2979 return B.CreateGEP( 2980 StartValue->getType()->getPointerElementType(), StartValue, 2981 CreateMul(Index, 2982 Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()))); 2983 } 2984 case InductionDescriptor::IK_FpInduction: { 2985 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2986 auto InductionBinOp = ID.getInductionBinOp(); 2987 assert(InductionBinOp && 2988 (InductionBinOp->getOpcode() == Instruction::FAdd || 2989 InductionBinOp->getOpcode() == Instruction::FSub) && 2990 "Original bin op should be defined for FP induction"); 2991 2992 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2993 2994 // Floating point operations had to be 'fast' to enable the induction. 2995 FastMathFlags Flags; 2996 Flags.setFast(); 2997 2998 Value *MulExp = B.CreateFMul(StepValue, Index); 2999 if (isa<Instruction>(MulExp)) 3000 // We have to check, the MulExp may be a constant. 3001 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 3002 3003 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3004 "induction"); 3005 if (isa<Instruction>(BOp)) 3006 cast<Instruction>(BOp)->setFastMathFlags(Flags); 3007 3008 return BOp; 3009 } 3010 case InductionDescriptor::IK_NoInduction: 3011 return nullptr; 3012 } 3013 llvm_unreachable("invalid enum"); 3014 } 3015 3016 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3017 LoopScalarBody = OrigLoop->getHeader(); 3018 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3019 LoopExitBlock = OrigLoop->getExitBlock(); 3020 assert(LoopExitBlock && "Must have an exit block"); 3021 assert(LoopVectorPreHeader && "Invalid loop structure"); 3022 3023 LoopMiddleBlock = 3024 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3025 LI, nullptr, Twine(Prefix) + "middle.block"); 3026 LoopScalarPreHeader = 3027 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3028 nullptr, Twine(Prefix) + "scalar.ph"); 3029 // We intentionally don't let SplitBlock to update LoopInfo since 3030 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3031 // LoopVectorBody is explicitly added to the correct place few lines later. 3032 LoopVectorBody = 3033 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3034 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3035 3036 // Update dominator for loop exit. 3037 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3038 3039 // Create and register the new vector loop. 3040 Loop *Lp = LI->AllocateLoop(); 3041 Loop *ParentLoop = OrigLoop->getParentLoop(); 3042 3043 // Insert the new loop into the loop nest and register the new basic blocks 3044 // before calling any utilities such as SCEV that require valid LoopInfo. 3045 if (ParentLoop) { 3046 ParentLoop->addChildLoop(Lp); 3047 } else { 3048 LI->addTopLevelLoop(Lp); 3049 } 3050 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3051 return Lp; 3052 } 3053 3054 void InnerLoopVectorizer::createInductionResumeValues(Loop *L, 3055 Value *VectorTripCount) { 3056 assert(VectorTripCount && L && "Expected valid arguments"); 3057 // We are going to resume the execution of the scalar loop. 3058 // Go over all of the induction variables that we found and fix the 3059 // PHIs that are left in the scalar version of the loop. 3060 // The starting values of PHI nodes depend on the counter of the last 3061 // iteration in the vectorized loop. 3062 // If we come from a bypass edge then we need to start from the original 3063 // start value. 3064 for (auto &InductionEntry : Legal->getInductionVars()) { 3065 PHINode *OrigPhi = InductionEntry.first; 3066 InductionDescriptor II = InductionEntry.second; 3067 3068 // Create phi nodes to merge from the backedge-taken check block. 3069 PHINode *BCResumeVal = 3070 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3071 LoopScalarPreHeader->getTerminator()); 3072 // Copy original phi DL over to the new one. 3073 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3074 Value *&EndValue = IVEndValues[OrigPhi]; 3075 if (OrigPhi == OldInduction) { 3076 // We know what the end value is. 3077 EndValue = VectorTripCount; 3078 } else { 3079 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3080 Type *StepType = II.getStep()->getType(); 3081 Instruction::CastOps CastOp = 3082 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3083 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3084 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3085 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3086 EndValue->setName("ind.end"); 3087 } 3088 3089 // The new PHI merges the original incoming value, in case of a bypass, 3090 // or the value at the end of the vectorized loop. 3091 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3092 3093 // Fix the scalar body counter (PHI node). 3094 // The old induction's phi node in the scalar body needs the truncated 3095 // value. 3096 for (BasicBlock *BB : LoopBypassBlocks) 3097 BCResumeVal->addIncoming(II.getStartValue(), BB); 3098 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3099 } 3100 } 3101 3102 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3103 MDNode *OrigLoopID) { 3104 assert(L && "Expected valid loop."); 3105 3106 // The trip counts should be cached by now. 3107 Value *Count = getOrCreateTripCount(L); 3108 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3109 3110 // We need the OrigLoop (scalar loop part) latch terminator to help 3111 // produce correct debug info for the middle block BB instructions. 3112 // The legality check stage guarantees that the loop will have a single 3113 // latch. 3114 assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) && 3115 "Scalar loop latch terminator isn't a branch"); 3116 BranchInst *ScalarLatchBr = 3117 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()); 3118 3119 // Add a check in the middle block to see if we have completed 3120 // all of the iterations in the first vector loop. 3121 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3122 // If tail is to be folded, we know we don't need to run the remainder. 3123 Value *CmpN = Builder.getTrue(); 3124 if (!Cost->foldTailByMasking()) { 3125 CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3126 VectorTripCount, "cmp.n", 3127 LoopMiddleBlock->getTerminator()); 3128 3129 // Here we use the same DebugLoc as the scalar loop latch branch instead 3130 // of the corresponding compare because they may have ended up with 3131 // different line numbers and we want to avoid awkward line stepping while 3132 // debugging. Eg. if the compare has got a line number inside the loop. 3133 cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc()); 3134 } 3135 3136 BranchInst *BrInst = 3137 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN); 3138 BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc()); 3139 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3140 3141 // Get ready to start creating new instructions into the vectorized body. 3142 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3143 "Inconsistent vector loop preheader"); 3144 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3145 3146 Optional<MDNode *> VectorizedLoopID = 3147 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3148 LLVMLoopVectorizeFollowupVectorized}); 3149 if (VectorizedLoopID.hasValue()) { 3150 L->setLoopID(VectorizedLoopID.getValue()); 3151 3152 // Do not setAlreadyVectorized if loop attributes have been defined 3153 // explicitly. 3154 return LoopVectorPreHeader; 3155 } 3156 3157 // Keep all loop hints from the original loop on the vector loop (we'll 3158 // replace the vectorizer-specific hints below). 3159 if (MDNode *LID = OrigLoop->getLoopID()) 3160 L->setLoopID(LID); 3161 3162 LoopVectorizeHints Hints(L, true, *ORE); 3163 Hints.setAlreadyVectorized(); 3164 3165 #ifdef EXPENSIVE_CHECKS 3166 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3167 LI->verify(*DT); 3168 #endif 3169 3170 return LoopVectorPreHeader; 3171 } 3172 3173 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3174 /* 3175 In this function we generate a new loop. The new loop will contain 3176 the vectorized instructions while the old loop will continue to run the 3177 scalar remainder. 3178 3179 [ ] <-- loop iteration number check. 3180 / | 3181 / v 3182 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3183 | / | 3184 | / v 3185 || [ ] <-- vector pre header. 3186 |/ | 3187 | v 3188 | [ ] \ 3189 | [ ]_| <-- vector loop. 3190 | | 3191 | v 3192 | -[ ] <--- middle-block. 3193 | / | 3194 | / v 3195 -|- >[ ] <--- new preheader. 3196 | | 3197 | v 3198 | [ ] \ 3199 | [ ]_| <-- old scalar loop to handle remainder. 3200 \ | 3201 \ v 3202 >[ ] <-- exit block. 3203 ... 3204 */ 3205 3206 // Get the metadata of the original loop before it gets modified. 3207 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3208 3209 // Create an empty vector loop, and prepare basic blocks for the runtime 3210 // checks. 3211 Loop *Lp = createVectorLoopSkeleton(""); 3212 3213 // Now, compare the new count to zero. If it is zero skip the vector loop and 3214 // jump to the scalar loop. This check also covers the case where the 3215 // backedge-taken count is uint##_max: adding one to it will overflow leading 3216 // to an incorrect trip count of zero. In this (rare) case we will also jump 3217 // to the scalar loop. 3218 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3219 3220 // Generate the code to check any assumptions that we've made for SCEV 3221 // expressions. 3222 emitSCEVChecks(Lp, LoopScalarPreHeader); 3223 3224 // Generate the code that checks in runtime if arrays overlap. We put the 3225 // checks into a separate block to make the more common case of few elements 3226 // faster. 3227 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3228 3229 // Some loops have a single integer induction variable, while other loops 3230 // don't. One example is c++ iterators that often have multiple pointer 3231 // induction variables. In the code below we also support a case where we 3232 // don't have a single induction variable. 3233 // 3234 // We try to obtain an induction variable from the original loop as hard 3235 // as possible. However if we don't find one that: 3236 // - is an integer 3237 // - counts from zero, stepping by one 3238 // - is the size of the widest induction variable type 3239 // then we create a new one. 3240 OldInduction = Legal->getPrimaryInduction(); 3241 Type *IdxTy = Legal->getWidestInductionType(); 3242 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3243 // The loop step is equal to the vectorization factor (num of SIMD elements) 3244 // times the unroll factor (num of SIMD instructions). 3245 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3246 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3247 Induction = 3248 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3249 getDebugLocFromInstOrOperands(OldInduction)); 3250 3251 // Emit phis for the new starting index of the scalar loop. 3252 createInductionResumeValues(Lp, CountRoundDown); 3253 3254 return completeLoopSkeleton(Lp, OrigLoopID); 3255 } 3256 3257 // Fix up external users of the induction variable. At this point, we are 3258 // in LCSSA form, with all external PHIs that use the IV having one input value, 3259 // coming from the remainder loop. We need those PHIs to also have a correct 3260 // value for the IV when arriving directly from the middle block. 3261 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3262 const InductionDescriptor &II, 3263 Value *CountRoundDown, Value *EndValue, 3264 BasicBlock *MiddleBlock) { 3265 // There are two kinds of external IV usages - those that use the value 3266 // computed in the last iteration (the PHI) and those that use the penultimate 3267 // value (the value that feeds into the phi from the loop latch). 3268 // We allow both, but they, obviously, have different values. 3269 3270 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3271 3272 DenseMap<Value *, Value *> MissingVals; 3273 3274 // An external user of the last iteration's value should see the value that 3275 // the remainder loop uses to initialize its own IV. 3276 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3277 for (User *U : PostInc->users()) { 3278 Instruction *UI = cast<Instruction>(U); 3279 if (!OrigLoop->contains(UI)) { 3280 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3281 MissingVals[UI] = EndValue; 3282 } 3283 } 3284 3285 // An external user of the penultimate value need to see EndValue - Step. 3286 // The simplest way to get this is to recompute it from the constituent SCEVs, 3287 // that is Start + (Step * (CRD - 1)). 3288 for (User *U : OrigPhi->users()) { 3289 auto *UI = cast<Instruction>(U); 3290 if (!OrigLoop->contains(UI)) { 3291 const DataLayout &DL = 3292 OrigLoop->getHeader()->getModule()->getDataLayout(); 3293 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3294 3295 IRBuilder<> B(MiddleBlock->getTerminator()); 3296 Value *CountMinusOne = B.CreateSub( 3297 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3298 Value *CMO = 3299 !II.getStep()->getType()->isIntegerTy() 3300 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3301 II.getStep()->getType()) 3302 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3303 CMO->setName("cast.cmo"); 3304 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3305 Escape->setName("ind.escape"); 3306 MissingVals[UI] = Escape; 3307 } 3308 } 3309 3310 for (auto &I : MissingVals) { 3311 PHINode *PHI = cast<PHINode>(I.first); 3312 // One corner case we have to handle is two IVs "chasing" each-other, 3313 // that is %IV2 = phi [...], [ %IV1, %latch ] 3314 // In this case, if IV1 has an external use, we need to avoid adding both 3315 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3316 // don't already have an incoming value for the middle block. 3317 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3318 PHI->addIncoming(I.second, MiddleBlock); 3319 } 3320 } 3321 3322 namespace { 3323 3324 struct CSEDenseMapInfo { 3325 static bool canHandle(const Instruction *I) { 3326 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3327 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3328 } 3329 3330 static inline Instruction *getEmptyKey() { 3331 return DenseMapInfo<Instruction *>::getEmptyKey(); 3332 } 3333 3334 static inline Instruction *getTombstoneKey() { 3335 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3336 } 3337 3338 static unsigned getHashValue(const Instruction *I) { 3339 assert(canHandle(I) && "Unknown instruction!"); 3340 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3341 I->value_op_end())); 3342 } 3343 3344 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3345 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3346 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3347 return LHS == RHS; 3348 return LHS->isIdenticalTo(RHS); 3349 } 3350 }; 3351 3352 } // end anonymous namespace 3353 3354 ///Perform cse of induction variable instructions. 3355 static void cse(BasicBlock *BB) { 3356 // Perform simple cse. 3357 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3358 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3359 Instruction *In = &*I++; 3360 3361 if (!CSEDenseMapInfo::canHandle(In)) 3362 continue; 3363 3364 // Check if we can replace this instruction with any of the 3365 // visited instructions. 3366 if (Instruction *V = CSEMap.lookup(In)) { 3367 In->replaceAllUsesWith(V); 3368 In->eraseFromParent(); 3369 continue; 3370 } 3371 3372 CSEMap[In] = In; 3373 } 3374 } 3375 3376 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, 3377 unsigned VF, 3378 bool &NeedToScalarize) { 3379 Function *F = CI->getCalledFunction(); 3380 Type *ScalarRetTy = CI->getType(); 3381 SmallVector<Type *, 4> Tys, ScalarTys; 3382 for (auto &ArgOp : CI->arg_operands()) 3383 ScalarTys.push_back(ArgOp->getType()); 3384 3385 // Estimate cost of scalarized vector call. The source operands are assumed 3386 // to be vectors, so we need to extract individual elements from there, 3387 // execute VF scalar calls, and then gather the result into the vector return 3388 // value. 3389 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, 3390 TTI::TCK_RecipThroughput); 3391 if (VF == 1) 3392 return ScalarCallCost; 3393 3394 // Compute corresponding vector type for return value and arguments. 3395 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3396 for (Type *ScalarTy : ScalarTys) 3397 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3398 3399 // Compute costs of unpacking argument values for the scalar calls and 3400 // packing the return values to a vector. 3401 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF); 3402 3403 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3404 3405 // If we can't emit a vector call for this function, then the currently found 3406 // cost is the cost we need to return. 3407 NeedToScalarize = true; 3408 VFShape Shape = 3409 VFShape::get(*CI, ElementCount::getFixed(VF), false /*HasGlobalPred*/); 3410 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3411 3412 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3413 return Cost; 3414 3415 // If the corresponding vector cost is cheaper, return its cost. 3416 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, 3417 TTI::TCK_RecipThroughput); 3418 if (VectorCallCost < Cost) { 3419 NeedToScalarize = false; 3420 return VectorCallCost; 3421 } 3422 return Cost; 3423 } 3424 3425 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3426 unsigned VF) { 3427 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3428 assert(ID && "Expected intrinsic call!"); 3429 3430 IntrinsicCostAttributes CostAttrs(ID, *CI, VF); 3431 return TTI.getIntrinsicInstrCost(CostAttrs, 3432 TargetTransformInfo::TCK_RecipThroughput); 3433 } 3434 3435 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3436 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3437 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3438 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3439 } 3440 3441 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3442 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3443 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3444 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3445 } 3446 3447 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3448 // For every instruction `I` in MinBWs, truncate the operands, create a 3449 // truncated version of `I` and reextend its result. InstCombine runs 3450 // later and will remove any ext/trunc pairs. 3451 SmallPtrSet<Value *, 4> Erased; 3452 for (const auto &KV : Cost->getMinimalBitwidths()) { 3453 // If the value wasn't vectorized, we must maintain the original scalar 3454 // type. The absence of the value from VectorLoopValueMap indicates that it 3455 // wasn't vectorized. 3456 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3457 continue; 3458 for (unsigned Part = 0; Part < UF; ++Part) { 3459 Value *I = getOrCreateVectorValue(KV.first, Part); 3460 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3461 continue; 3462 Type *OriginalTy = I->getType(); 3463 Type *ScalarTruncatedTy = 3464 IntegerType::get(OriginalTy->getContext(), KV.second); 3465 auto *TruncatedTy = FixedVectorType::get( 3466 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements()); 3467 if (TruncatedTy == OriginalTy) 3468 continue; 3469 3470 IRBuilder<> B(cast<Instruction>(I)); 3471 auto ShrinkOperand = [&](Value *V) -> Value * { 3472 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3473 if (ZI->getSrcTy() == TruncatedTy) 3474 return ZI->getOperand(0); 3475 return B.CreateZExtOrTrunc(V, TruncatedTy); 3476 }; 3477 3478 // The actual instruction modification depends on the instruction type, 3479 // unfortunately. 3480 Value *NewI = nullptr; 3481 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3482 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3483 ShrinkOperand(BO->getOperand(1))); 3484 3485 // Any wrapping introduced by shrinking this operation shouldn't be 3486 // considered undefined behavior. So, we can't unconditionally copy 3487 // arithmetic wrapping flags to NewI. 3488 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3489 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3490 NewI = 3491 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3492 ShrinkOperand(CI->getOperand(1))); 3493 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3494 NewI = B.CreateSelect(SI->getCondition(), 3495 ShrinkOperand(SI->getTrueValue()), 3496 ShrinkOperand(SI->getFalseValue())); 3497 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3498 switch (CI->getOpcode()) { 3499 default: 3500 llvm_unreachable("Unhandled cast!"); 3501 case Instruction::Trunc: 3502 NewI = ShrinkOperand(CI->getOperand(0)); 3503 break; 3504 case Instruction::SExt: 3505 NewI = B.CreateSExtOrTrunc( 3506 CI->getOperand(0), 3507 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3508 break; 3509 case Instruction::ZExt: 3510 NewI = B.CreateZExtOrTrunc( 3511 CI->getOperand(0), 3512 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3513 break; 3514 } 3515 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3516 auto Elements0 = 3517 cast<VectorType>(SI->getOperand(0)->getType())->getNumElements(); 3518 auto *O0 = B.CreateZExtOrTrunc( 3519 SI->getOperand(0), 3520 FixedVectorType::get(ScalarTruncatedTy, Elements0)); 3521 auto Elements1 = 3522 cast<VectorType>(SI->getOperand(1)->getType())->getNumElements(); 3523 auto *O1 = B.CreateZExtOrTrunc( 3524 SI->getOperand(1), 3525 FixedVectorType::get(ScalarTruncatedTy, Elements1)); 3526 3527 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3528 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3529 // Don't do anything with the operands, just extend the result. 3530 continue; 3531 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3532 auto Elements = 3533 cast<VectorType>(IE->getOperand(0)->getType())->getNumElements(); 3534 auto *O0 = B.CreateZExtOrTrunc( 3535 IE->getOperand(0), 3536 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3537 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3538 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3539 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3540 auto Elements = 3541 cast<VectorType>(EE->getOperand(0)->getType())->getNumElements(); 3542 auto *O0 = B.CreateZExtOrTrunc( 3543 EE->getOperand(0), 3544 FixedVectorType::get(ScalarTruncatedTy, Elements)); 3545 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3546 } else { 3547 // If we don't know what to do, be conservative and don't do anything. 3548 continue; 3549 } 3550 3551 // Lastly, extend the result. 3552 NewI->takeName(cast<Instruction>(I)); 3553 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3554 I->replaceAllUsesWith(Res); 3555 cast<Instruction>(I)->eraseFromParent(); 3556 Erased.insert(I); 3557 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3558 } 3559 } 3560 3561 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3562 for (const auto &KV : Cost->getMinimalBitwidths()) { 3563 // If the value wasn't vectorized, we must maintain the original scalar 3564 // type. The absence of the value from VectorLoopValueMap indicates that it 3565 // wasn't vectorized. 3566 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3567 continue; 3568 for (unsigned Part = 0; Part < UF; ++Part) { 3569 Value *I = getOrCreateVectorValue(KV.first, Part); 3570 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3571 if (Inst && Inst->use_empty()) { 3572 Value *NewI = Inst->getOperand(0); 3573 Inst->eraseFromParent(); 3574 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3575 } 3576 } 3577 } 3578 } 3579 3580 void InnerLoopVectorizer::fixVectorizedLoop() { 3581 // Insert truncates and extends for any truncated instructions as hints to 3582 // InstCombine. 3583 if (VF > 1) 3584 truncateToMinimalBitwidths(); 3585 3586 // Fix widened non-induction PHIs by setting up the PHI operands. 3587 if (OrigPHIsToFix.size()) { 3588 assert(EnableVPlanNativePath && 3589 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3590 fixNonInductionPHIs(); 3591 } 3592 3593 // At this point every instruction in the original loop is widened to a 3594 // vector form. Now we need to fix the recurrences in the loop. These PHI 3595 // nodes are currently empty because we did not want to introduce cycles. 3596 // This is the second stage of vectorizing recurrences. 3597 fixCrossIterationPHIs(); 3598 3599 // Forget the original basic block. 3600 PSE.getSE()->forgetLoop(OrigLoop); 3601 3602 // Fix-up external users of the induction variables. 3603 for (auto &Entry : Legal->getInductionVars()) 3604 fixupIVUsers(Entry.first, Entry.second, 3605 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3606 IVEndValues[Entry.first], LoopMiddleBlock); 3607 3608 fixLCSSAPHIs(); 3609 for (Instruction *PI : PredicatedInstructions) 3610 sinkScalarOperands(&*PI); 3611 3612 // Remove redundant induction instructions. 3613 cse(LoopVectorBody); 3614 3615 // Set/update profile weights for the vector and remainder loops as original 3616 // loop iterations are now distributed among them. Note that original loop 3617 // represented by LoopScalarBody becomes remainder loop after vectorization. 3618 // 3619 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3620 // end up getting slightly roughened result but that should be OK since 3621 // profile is not inherently precise anyway. Note also possible bypass of 3622 // vector code caused by legality checks is ignored, assigning all the weight 3623 // to the vector loop, optimistically. 3624 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), 3625 LI->getLoopFor(LoopVectorBody), 3626 LI->getLoopFor(LoopScalarBody), VF * UF); 3627 } 3628 3629 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3630 // In order to support recurrences we need to be able to vectorize Phi nodes. 3631 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3632 // stage #2: We now need to fix the recurrences by adding incoming edges to 3633 // the currently empty PHI nodes. At this point every instruction in the 3634 // original loop is widened to a vector form so we can use them to construct 3635 // the incoming edges. 3636 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3637 // Handle first-order recurrences and reductions that need to be fixed. 3638 if (Legal->isFirstOrderRecurrence(&Phi)) 3639 fixFirstOrderRecurrence(&Phi); 3640 else if (Legal->isReductionVariable(&Phi)) 3641 fixReduction(&Phi); 3642 } 3643 } 3644 3645 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3646 // This is the second phase of vectorizing first-order recurrences. An 3647 // overview of the transformation is described below. Suppose we have the 3648 // following loop. 3649 // 3650 // for (int i = 0; i < n; ++i) 3651 // b[i] = a[i] - a[i - 1]; 3652 // 3653 // There is a first-order recurrence on "a". For this loop, the shorthand 3654 // scalar IR looks like: 3655 // 3656 // scalar.ph: 3657 // s_init = a[-1] 3658 // br scalar.body 3659 // 3660 // scalar.body: 3661 // i = phi [0, scalar.ph], [i+1, scalar.body] 3662 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3663 // s2 = a[i] 3664 // b[i] = s2 - s1 3665 // br cond, scalar.body, ... 3666 // 3667 // In this example, s1 is a recurrence because it's value depends on the 3668 // previous iteration. In the first phase of vectorization, we created a 3669 // temporary value for s1. We now complete the vectorization and produce the 3670 // shorthand vector IR shown below (for VF = 4, UF = 1). 3671 // 3672 // vector.ph: 3673 // v_init = vector(..., ..., ..., a[-1]) 3674 // br vector.body 3675 // 3676 // vector.body 3677 // i = phi [0, vector.ph], [i+4, vector.body] 3678 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3679 // v2 = a[i, i+1, i+2, i+3]; 3680 // v3 = vector(v1(3), v2(0, 1, 2)) 3681 // b[i, i+1, i+2, i+3] = v2 - v3 3682 // br cond, vector.body, middle.block 3683 // 3684 // middle.block: 3685 // x = v2(3) 3686 // br scalar.ph 3687 // 3688 // scalar.ph: 3689 // s_init = phi [x, middle.block], [a[-1], otherwise] 3690 // br scalar.body 3691 // 3692 // After execution completes the vector loop, we extract the next value of 3693 // the recurrence (x) to use as the initial value in the scalar loop. 3694 3695 // Get the original loop preheader and single loop latch. 3696 auto *Preheader = OrigLoop->getLoopPreheader(); 3697 auto *Latch = OrigLoop->getLoopLatch(); 3698 3699 // Get the initial and previous values of the scalar recurrence. 3700 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3701 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3702 3703 // Create a vector from the initial value. 3704 auto *VectorInit = ScalarInit; 3705 if (VF > 1) { 3706 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3707 VectorInit = Builder.CreateInsertElement( 3708 UndefValue::get(FixedVectorType::get(VectorInit->getType(), VF)), 3709 VectorInit, Builder.getInt32(VF - 1), "vector.recur.init"); 3710 } 3711 3712 // We constructed a temporary phi node in the first phase of vectorization. 3713 // This phi node will eventually be deleted. 3714 Builder.SetInsertPoint( 3715 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3716 3717 // Create a phi node for the new recurrence. The current value will either be 3718 // the initial value inserted into a vector or loop-varying vector value. 3719 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3720 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3721 3722 // Get the vectorized previous value of the last part UF - 1. It appears last 3723 // among all unrolled iterations, due to the order of their construction. 3724 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3725 3726 // Find and set the insertion point after the previous value if it is an 3727 // instruction. 3728 BasicBlock::iterator InsertPt; 3729 // Note that the previous value may have been constant-folded so it is not 3730 // guaranteed to be an instruction in the vector loop. 3731 // FIXME: Loop invariant values do not form recurrences. We should deal with 3732 // them earlier. 3733 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 3734 InsertPt = LoopVectorBody->getFirstInsertionPt(); 3735 else { 3736 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 3737 if (isa<PHINode>(PreviousLastPart)) 3738 // If the previous value is a phi node, we should insert after all the phi 3739 // nodes in the block containing the PHI to avoid breaking basic block 3740 // verification. Note that the basic block may be different to 3741 // LoopVectorBody, in case we predicate the loop. 3742 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 3743 else 3744 InsertPt = ++PreviousInst->getIterator(); 3745 } 3746 Builder.SetInsertPoint(&*InsertPt); 3747 3748 // We will construct a vector for the recurrence by combining the values for 3749 // the current and previous iterations. This is the required shuffle mask. 3750 SmallVector<int, 8> ShuffleMask(VF); 3751 ShuffleMask[0] = VF - 1; 3752 for (unsigned I = 1; I < VF; ++I) 3753 ShuffleMask[I] = I + VF - 1; 3754 3755 // The vector from which to take the initial value for the current iteration 3756 // (actual or unrolled). Initially, this is the vector phi node. 3757 Value *Incoming = VecPhi; 3758 3759 // Shuffle the current and previous vector and update the vector parts. 3760 for (unsigned Part = 0; Part < UF; ++Part) { 3761 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3762 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3763 auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3764 ShuffleMask) 3765 : Incoming; 3766 PhiPart->replaceAllUsesWith(Shuffle); 3767 cast<Instruction>(PhiPart)->eraseFromParent(); 3768 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3769 Incoming = PreviousPart; 3770 } 3771 3772 // Fix the latch value of the new recurrence in the vector loop. 3773 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3774 3775 // Extract the last vector element in the middle block. This will be the 3776 // initial value for the recurrence when jumping to the scalar loop. 3777 auto *ExtractForScalar = Incoming; 3778 if (VF > 1) { 3779 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3780 ExtractForScalar = Builder.CreateExtractElement( 3781 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3782 } 3783 // Extract the second last element in the middle block if the 3784 // Phi is used outside the loop. We need to extract the phi itself 3785 // and not the last element (the phi update in the current iteration). This 3786 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3787 // when the scalar loop is not run at all. 3788 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3789 if (VF > 1) 3790 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3791 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3792 // When loop is unrolled without vectorizing, initialize 3793 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3794 // `Incoming`. This is analogous to the vectorized case above: extracting the 3795 // second last element when VF > 1. 3796 else if (UF > 1) 3797 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3798 3799 // Fix the initial value of the original recurrence in the scalar loop. 3800 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3801 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3802 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3803 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3804 Start->addIncoming(Incoming, BB); 3805 } 3806 3807 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3808 Phi->setName("scalar.recur"); 3809 3810 // Finally, fix users of the recurrence outside the loop. The users will need 3811 // either the last value of the scalar recurrence or the last value of the 3812 // vector recurrence we extracted in the middle block. Since the loop is in 3813 // LCSSA form, we just need to find all the phi nodes for the original scalar 3814 // recurrence in the exit block, and then add an edge for the middle block. 3815 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3816 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3817 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3818 } 3819 } 3820 } 3821 3822 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3823 Constant *Zero = Builder.getInt32(0); 3824 3825 // Get it's reduction variable descriptor. 3826 assert(Legal->isReductionVariable(Phi) && 3827 "Unable to find the reduction variable"); 3828 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 3829 3830 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3831 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3832 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3833 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3834 RdxDesc.getMinMaxRecurrenceKind(); 3835 setDebugLocFromInst(Builder, ReductionStartValue); 3836 bool IsInLoopReductionPhi = Cost->isInLoopReduction(Phi); 3837 3838 // We need to generate a reduction vector from the incoming scalar. 3839 // To do so, we need to generate the 'identity' vector and override 3840 // one of the elements with the incoming scalar reduction. We need 3841 // to do it in the vector-loop preheader. 3842 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3843 3844 // This is the vector-clone of the value that leaves the loop. 3845 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3846 3847 // Find the reduction identity variable. Zero for addition, or, xor, 3848 // one for multiplication, -1 for And. 3849 Value *Identity; 3850 Value *VectorStart; 3851 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3852 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3853 // MinMax reduction have the start value as their identify. 3854 if (VF == 1 || IsInLoopReductionPhi) { 3855 VectorStart = Identity = ReductionStartValue; 3856 } else { 3857 VectorStart = Identity = 3858 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3859 } 3860 } else { 3861 // Handle other reduction kinds: 3862 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3863 RK, VecTy->getScalarType()); 3864 if (VF == 1 || IsInLoopReductionPhi) { 3865 Identity = Iden; 3866 // This vector is the Identity vector where the first element is the 3867 // incoming scalar reduction. 3868 VectorStart = ReductionStartValue; 3869 } else { 3870 Identity = ConstantVector::getSplat(ElementCount::getFixed(VF), Iden); 3871 3872 // This vector is the Identity vector where the first element is the 3873 // incoming scalar reduction. 3874 VectorStart = 3875 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3876 } 3877 } 3878 3879 // Wrap flags are in general invalid after vectorization, clear them. 3880 clearReductionWrapFlags(RdxDesc); 3881 3882 // Fix the vector-loop phi. 3883 3884 // Reductions do not have to start at zero. They can start with 3885 // any loop invariant values. 3886 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3887 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3888 3889 for (unsigned Part = 0; Part < UF; ++Part) { 3890 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3891 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3892 // Make sure to add the reduction start value only to the 3893 // first unroll part. 3894 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3895 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3896 cast<PHINode>(VecRdxPhi) 3897 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3898 } 3899 3900 // Before each round, move the insertion point right between 3901 // the PHIs and the values we are going to write. 3902 // This allows us to write both PHINodes and the extractelement 3903 // instructions. 3904 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3905 3906 setDebugLocFromInst(Builder, LoopExitInst); 3907 3908 // If tail is folded by masking, the vector value to leave the loop should be 3909 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3910 // instead of the former. 3911 if (Cost->foldTailByMasking()) { 3912 for (unsigned Part = 0; Part < UF; ++Part) { 3913 Value *VecLoopExitInst = 3914 VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3915 Value *Sel = nullptr; 3916 for (User *U : VecLoopExitInst->users()) { 3917 if (isa<SelectInst>(U)) { 3918 assert(!Sel && "Reduction exit feeding two selects"); 3919 Sel = U; 3920 } else 3921 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3922 } 3923 assert(Sel && "Reduction exit feeds no select"); 3924 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel); 3925 3926 // If the target can create a predicated operator for the reduction at no 3927 // extra cost in the loop (for example a predicated vadd), it can be 3928 // cheaper for the select to remain in the loop than be sunk out of it, 3929 // and so use the select value for the phi instead of the old 3930 // LoopExitValue. 3931 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 3932 if (PreferPredicatedReductionSelect) { 3933 auto *VecRdxPhi = cast<PHINode>(getOrCreateVectorValue(Phi, Part)); 3934 VecRdxPhi->setIncomingValueForBlock( 3935 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 3936 } 3937 } 3938 } 3939 3940 // If the vector reduction can be performed in a smaller type, we truncate 3941 // then extend the loop exit value to enable InstCombine to evaluate the 3942 // entire expression in the smaller type. 3943 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3944 assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); 3945 Type *RdxVecTy = FixedVectorType::get(RdxDesc.getRecurrenceType(), VF); 3946 Builder.SetInsertPoint( 3947 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3948 VectorParts RdxParts(UF); 3949 for (unsigned Part = 0; Part < UF; ++Part) { 3950 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3951 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3952 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3953 : Builder.CreateZExt(Trunc, VecTy); 3954 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3955 UI != RdxParts[Part]->user_end();) 3956 if (*UI != Trunc) { 3957 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3958 RdxParts[Part] = Extnd; 3959 } else { 3960 ++UI; 3961 } 3962 } 3963 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3964 for (unsigned Part = 0; Part < UF; ++Part) { 3965 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3966 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3967 } 3968 } 3969 3970 // Reduce all of the unrolled parts into a single vector. 3971 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3972 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3973 3974 // The middle block terminator has already been assigned a DebugLoc here (the 3975 // OrigLoop's single latch terminator). We want the whole middle block to 3976 // appear to execute on this line because: (a) it is all compiler generated, 3977 // (b) these instructions are always executed after evaluating the latch 3978 // conditional branch, and (c) other passes may add new predecessors which 3979 // terminate on this line. This is the easiest way to ensure we don't 3980 // accidentally cause an extra step back into the loop while debugging. 3981 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 3982 for (unsigned Part = 1; Part < UF; ++Part) { 3983 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3984 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3985 // Floating point operations had to be 'fast' to enable the reduction. 3986 ReducedPartRdx = addFastMathFlag( 3987 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3988 ReducedPartRdx, "bin.rdx"), 3989 RdxDesc.getFastMathFlags()); 3990 else 3991 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3992 RdxPart); 3993 } 3994 3995 // Create the reduction after the loop. Note that inloop reductions create the 3996 // target reduction in the loop using a Reduction recipe. 3997 if (VF > 1 && !IsInLoopReductionPhi) { 3998 bool NoNaN = Legal->hasFunNoNaNAttr(); 3999 ReducedPartRdx = 4000 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 4001 // If the reduction can be performed in a smaller type, we need to extend 4002 // the reduction to the wider type before we branch to the original loop. 4003 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4004 ReducedPartRdx = 4005 RdxDesc.isSigned() 4006 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4007 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4008 } 4009 4010 // Create a phi node that merges control-flow from the backedge-taken check 4011 // block and the middle block. 4012 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4013 LoopScalarPreHeader->getTerminator()); 4014 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4015 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4016 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4017 4018 // Now, we need to fix the users of the reduction variable 4019 // inside and outside of the scalar remainder loop. 4020 // We know that the loop is in LCSSA form. We need to update the 4021 // PHI nodes in the exit blocks. 4022 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4023 // All PHINodes need to have a single entry edge, or two if 4024 // we already fixed them. 4025 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4026 4027 // We found a reduction value exit-PHI. Update it with the 4028 // incoming bypass edge. 4029 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 4030 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4031 } // end of the LCSSA phi scan. 4032 4033 // Fix the scalar loop reduction variable with the incoming reduction sum 4034 // from the vector body and from the backedge value. 4035 int IncomingEdgeBlockIdx = 4036 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4037 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4038 // Pick the other block. 4039 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4040 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4041 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4042 } 4043 4044 void InnerLoopVectorizer::clearReductionWrapFlags( 4045 RecurrenceDescriptor &RdxDesc) { 4046 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 4047 if (RK != RecurrenceDescriptor::RK_IntegerAdd && 4048 RK != RecurrenceDescriptor::RK_IntegerMult) 4049 return; 4050 4051 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4052 assert(LoopExitInstr && "null loop exit instruction"); 4053 SmallVector<Instruction *, 8> Worklist; 4054 SmallPtrSet<Instruction *, 8> Visited; 4055 Worklist.push_back(LoopExitInstr); 4056 Visited.insert(LoopExitInstr); 4057 4058 while (!Worklist.empty()) { 4059 Instruction *Cur = Worklist.pop_back_val(); 4060 if (isa<OverflowingBinaryOperator>(Cur)) 4061 for (unsigned Part = 0; Part < UF; ++Part) { 4062 Value *V = getOrCreateVectorValue(Cur, Part); 4063 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4064 } 4065 4066 for (User *U : Cur->users()) { 4067 Instruction *UI = cast<Instruction>(U); 4068 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4069 Visited.insert(UI).second) 4070 Worklist.push_back(UI); 4071 } 4072 } 4073 } 4074 4075 void InnerLoopVectorizer::fixLCSSAPHIs() { 4076 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4077 if (LCSSAPhi.getNumIncomingValues() == 1) { 4078 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4079 // Non-instruction incoming values will have only one value. 4080 unsigned LastLane = 0; 4081 if (isa<Instruction>(IncomingValue)) 4082 LastLane = Cost->isUniformAfterVectorization( 4083 cast<Instruction>(IncomingValue), VF) 4084 ? 0 4085 : VF - 1; 4086 // Can be a loop invariant incoming value or the last scalar value to be 4087 // extracted from the vectorized loop. 4088 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4089 Value *lastIncomingValue = 4090 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 4091 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4092 } 4093 } 4094 } 4095 4096 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4097 // The basic block and loop containing the predicated instruction. 4098 auto *PredBB = PredInst->getParent(); 4099 auto *VectorLoop = LI->getLoopFor(PredBB); 4100 4101 // Initialize a worklist with the operands of the predicated instruction. 4102 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4103 4104 // Holds instructions that we need to analyze again. An instruction may be 4105 // reanalyzed if we don't yet know if we can sink it or not. 4106 SmallVector<Instruction *, 8> InstsToReanalyze; 4107 4108 // Returns true if a given use occurs in the predicated block. Phi nodes use 4109 // their operands in their corresponding predecessor blocks. 4110 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4111 auto *I = cast<Instruction>(U.getUser()); 4112 BasicBlock *BB = I->getParent(); 4113 if (auto *Phi = dyn_cast<PHINode>(I)) 4114 BB = Phi->getIncomingBlock( 4115 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4116 return BB == PredBB; 4117 }; 4118 4119 // Iteratively sink the scalarized operands of the predicated instruction 4120 // into the block we created for it. When an instruction is sunk, it's 4121 // operands are then added to the worklist. The algorithm ends after one pass 4122 // through the worklist doesn't sink a single instruction. 4123 bool Changed; 4124 do { 4125 // Add the instructions that need to be reanalyzed to the worklist, and 4126 // reset the changed indicator. 4127 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4128 InstsToReanalyze.clear(); 4129 Changed = false; 4130 4131 while (!Worklist.empty()) { 4132 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4133 4134 // We can't sink an instruction if it is a phi node, is already in the 4135 // predicated block, is not in the loop, or may have side effects. 4136 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4137 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4138 continue; 4139 4140 // It's legal to sink the instruction if all its uses occur in the 4141 // predicated block. Otherwise, there's nothing to do yet, and we may 4142 // need to reanalyze the instruction. 4143 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4144 InstsToReanalyze.push_back(I); 4145 continue; 4146 } 4147 4148 // Move the instruction to the beginning of the predicated block, and add 4149 // it's operands to the worklist. 4150 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4151 Worklist.insert(I->op_begin(), I->op_end()); 4152 4153 // The sinking may have enabled other instructions to be sunk, so we will 4154 // need to iterate. 4155 Changed = true; 4156 } 4157 } while (Changed); 4158 } 4159 4160 void InnerLoopVectorizer::fixNonInductionPHIs() { 4161 for (PHINode *OrigPhi : OrigPHIsToFix) { 4162 PHINode *NewPhi = 4163 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 4164 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 4165 4166 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 4167 predecessors(OrigPhi->getParent())); 4168 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 4169 predecessors(NewPhi->getParent())); 4170 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 4171 "Scalar and Vector BB should have the same number of predecessors"); 4172 4173 // The insertion point in Builder may be invalidated by the time we get 4174 // here. Force the Builder insertion point to something valid so that we do 4175 // not run into issues during insertion point restore in 4176 // getOrCreateVectorValue calls below. 4177 Builder.SetInsertPoint(NewPhi); 4178 4179 // The predecessor order is preserved and we can rely on mapping between 4180 // scalar and vector block predecessors. 4181 for (unsigned i = 0; i < NumIncomingValues; ++i) { 4182 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 4183 4184 // When looking up the new scalar/vector values to fix up, use incoming 4185 // values from original phi. 4186 Value *ScIncV = 4187 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 4188 4189 // Scalar incoming value may need a broadcast 4190 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 4191 NewPhi->addIncoming(NewIncV, NewPredBB); 4192 } 4193 } 4194 } 4195 4196 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPUser &Operands, 4197 unsigned UF, unsigned VF, 4198 bool IsPtrLoopInvariant, 4199 SmallBitVector &IsIndexLoopInvariant, 4200 VPTransformState &State) { 4201 // Construct a vector GEP by widening the operands of the scalar GEP as 4202 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4203 // results in a vector of pointers when at least one operand of the GEP 4204 // is vector-typed. Thus, to keep the representation compact, we only use 4205 // vector-typed operands for loop-varying values. 4206 4207 if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4208 // If we are vectorizing, but the GEP has only loop-invariant operands, 4209 // the GEP we build (by only using vector-typed operands for 4210 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4211 // produce a vector of pointers, we need to either arbitrarily pick an 4212 // operand to broadcast, or broadcast a clone of the original GEP. 4213 // Here, we broadcast a clone of the original. 4214 // 4215 // TODO: If at some point we decide to scalarize instructions having 4216 // loop-invariant operands, this special case will no longer be 4217 // required. We would add the scalarization decision to 4218 // collectLoopScalars() and teach getVectorValue() to broadcast 4219 // the lane-zero scalar value. 4220 auto *Clone = Builder.Insert(GEP->clone()); 4221 for (unsigned Part = 0; Part < UF; ++Part) { 4222 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4223 VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart); 4224 addMetadata(EntryPart, GEP); 4225 } 4226 } else { 4227 // If the GEP has at least one loop-varying operand, we are sure to 4228 // produce a vector of pointers. But if we are only unrolling, we want 4229 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4230 // produce with the code below will be scalar (if VF == 1) or vector 4231 // (otherwise). Note that for the unroll-only case, we still maintain 4232 // values in the vector mapping with initVector, as we do for other 4233 // instructions. 4234 for (unsigned Part = 0; Part < UF; ++Part) { 4235 // The pointer operand of the new GEP. If it's loop-invariant, we 4236 // won't broadcast it. 4237 auto *Ptr = IsPtrLoopInvariant ? State.get(Operands.getOperand(0), {0, 0}) 4238 : State.get(Operands.getOperand(0), Part); 4239 4240 // Collect all the indices for the new GEP. If any index is 4241 // loop-invariant, we won't broadcast it. 4242 SmallVector<Value *, 4> Indices; 4243 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4244 VPValue *Operand = Operands.getOperand(I); 4245 if (IsIndexLoopInvariant[I - 1]) 4246 Indices.push_back(State.get(Operand, {0, 0})); 4247 else 4248 Indices.push_back(State.get(Operand, Part)); 4249 } 4250 4251 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4252 // but it should be a vector, otherwise. 4253 auto *NewGEP = 4254 GEP->isInBounds() 4255 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4256 Indices) 4257 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4258 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4259 "NewGEP is not a pointer vector"); 4260 VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP); 4261 addMetadata(NewGEP, GEP); 4262 } 4263 } 4264 } 4265 4266 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4267 unsigned VF) { 4268 PHINode *P = cast<PHINode>(PN); 4269 if (EnableVPlanNativePath) { 4270 // Currently we enter here in the VPlan-native path for non-induction 4271 // PHIs where all control flow is uniform. We simply widen these PHIs. 4272 // Create a vector phi with no operands - the vector phi operands will be 4273 // set at the end of vector code generation. 4274 Type *VecTy = 4275 (VF == 1) ? PN->getType() : FixedVectorType::get(PN->getType(), VF); 4276 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4277 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 4278 OrigPHIsToFix.push_back(P); 4279 4280 return; 4281 } 4282 4283 assert(PN->getParent() == OrigLoop->getHeader() && 4284 "Non-header phis should have been handled elsewhere"); 4285 4286 // In order to support recurrences we need to be able to vectorize Phi nodes. 4287 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4288 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4289 // this value when we vectorize all of the instructions that use the PHI. 4290 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4291 for (unsigned Part = 0; Part < UF; ++Part) { 4292 // This is phase one of vectorizing PHIs. 4293 bool ScalarPHI = (VF == 1) || Cost->isInLoopReduction(cast<PHINode>(PN)); 4294 Type *VecTy = 4295 ScalarPHI ? PN->getType() : FixedVectorType::get(PN->getType(), VF); 4296 Value *EntryPart = PHINode::Create( 4297 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4298 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4299 } 4300 return; 4301 } 4302 4303 setDebugLocFromInst(Builder, P); 4304 4305 // This PHINode must be an induction variable. 4306 // Make sure that we know about it. 4307 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4308 4309 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4310 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4311 4312 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4313 // which can be found from the original scalar operations. 4314 switch (II.getKind()) { 4315 case InductionDescriptor::IK_NoInduction: 4316 llvm_unreachable("Unknown induction"); 4317 case InductionDescriptor::IK_IntInduction: 4318 case InductionDescriptor::IK_FpInduction: 4319 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4320 case InductionDescriptor::IK_PtrInduction: { 4321 // Handle the pointer induction variable case. 4322 assert(P->getType()->isPointerTy() && "Unexpected type."); 4323 4324 if (Cost->isScalarAfterVectorization(P, VF)) { 4325 // This is the normalized GEP that starts counting at zero. 4326 Value *PtrInd = 4327 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4328 // Determine the number of scalars we need to generate for each unroll 4329 // iteration. If the instruction is uniform, we only need to generate the 4330 // first lane. Otherwise, we generate all VF values. 4331 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4332 for (unsigned Part = 0; Part < UF; ++Part) { 4333 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4334 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4335 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4336 Value *SclrGep = 4337 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4338 SclrGep->setName("next.gep"); 4339 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4340 } 4341 } 4342 return; 4343 } 4344 assert(isa<SCEVConstant>(II.getStep()) && 4345 "Induction step not a SCEV constant!"); 4346 Type *PhiType = II.getStep()->getType(); 4347 4348 // Build a pointer phi 4349 Value *ScalarStartValue = II.getStartValue(); 4350 Type *ScStValueType = ScalarStartValue->getType(); 4351 PHINode *NewPointerPhi = 4352 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4353 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4354 4355 // A pointer induction, performed by using a gep 4356 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4357 Instruction *InductionLoc = LoopLatch->getTerminator(); 4358 const SCEV *ScalarStep = II.getStep(); 4359 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4360 Value *ScalarStepValue = 4361 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4362 Value *InductionGEP = GetElementPtrInst::Create( 4363 ScStValueType->getPointerElementType(), NewPointerPhi, 4364 Builder.CreateMul(ScalarStepValue, ConstantInt::get(PhiType, VF * UF)), 4365 "ptr.ind", InductionLoc); 4366 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4367 4368 // Create UF many actual address geps that use the pointer 4369 // phi as base and a vectorized version of the step value 4370 // (<step*0, ..., step*N>) as offset. 4371 for (unsigned Part = 0; Part < UF; ++Part) { 4372 SmallVector<Constant *, 8> Indices; 4373 // Create a vector of consecutive numbers from zero to VF. 4374 for (unsigned i = 0; i < VF; ++i) 4375 Indices.push_back(ConstantInt::get(PhiType, i + Part * VF)); 4376 Constant *StartOffset = ConstantVector::get(Indices); 4377 4378 Value *GEP = Builder.CreateGEP( 4379 ScStValueType->getPointerElementType(), NewPointerPhi, 4380 Builder.CreateMul(StartOffset, 4381 Builder.CreateVectorSplat(VF, ScalarStepValue), 4382 "vector.gep")); 4383 VectorLoopValueMap.setVectorValue(P, Part, GEP); 4384 } 4385 } 4386 } 4387 } 4388 4389 /// A helper function for checking whether an integer division-related 4390 /// instruction may divide by zero (in which case it must be predicated if 4391 /// executed conditionally in the scalar code). 4392 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4393 /// Non-zero divisors that are non compile-time constants will not be 4394 /// converted into multiplication, so we will still end up scalarizing 4395 /// the division, but can do so w/o predication. 4396 static bool mayDivideByZero(Instruction &I) { 4397 assert((I.getOpcode() == Instruction::UDiv || 4398 I.getOpcode() == Instruction::SDiv || 4399 I.getOpcode() == Instruction::URem || 4400 I.getOpcode() == Instruction::SRem) && 4401 "Unexpected instruction"); 4402 Value *Divisor = I.getOperand(1); 4403 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4404 return !CInt || CInt->isZero(); 4405 } 4406 4407 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User, 4408 VPTransformState &State) { 4409 switch (I.getOpcode()) { 4410 case Instruction::Call: 4411 case Instruction::Br: 4412 case Instruction::PHI: 4413 case Instruction::GetElementPtr: 4414 case Instruction::Select: 4415 llvm_unreachable("This instruction is handled by a different recipe."); 4416 case Instruction::UDiv: 4417 case Instruction::SDiv: 4418 case Instruction::SRem: 4419 case Instruction::URem: 4420 case Instruction::Add: 4421 case Instruction::FAdd: 4422 case Instruction::Sub: 4423 case Instruction::FSub: 4424 case Instruction::FNeg: 4425 case Instruction::Mul: 4426 case Instruction::FMul: 4427 case Instruction::FDiv: 4428 case Instruction::FRem: 4429 case Instruction::Shl: 4430 case Instruction::LShr: 4431 case Instruction::AShr: 4432 case Instruction::And: 4433 case Instruction::Or: 4434 case Instruction::Xor: { 4435 // Just widen unops and binops. 4436 setDebugLocFromInst(Builder, &I); 4437 4438 for (unsigned Part = 0; Part < UF; ++Part) { 4439 SmallVector<Value *, 2> Ops; 4440 for (VPValue *VPOp : User.operands()) 4441 Ops.push_back(State.get(VPOp, Part)); 4442 4443 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4444 4445 if (auto *VecOp = dyn_cast<Instruction>(V)) 4446 VecOp->copyIRFlags(&I); 4447 4448 // Use this vector value for all users of the original instruction. 4449 VectorLoopValueMap.setVectorValue(&I, Part, V); 4450 addMetadata(V, &I); 4451 } 4452 4453 break; 4454 } 4455 case Instruction::ICmp: 4456 case Instruction::FCmp: { 4457 // Widen compares. Generate vector compares. 4458 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4459 auto *Cmp = cast<CmpInst>(&I); 4460 setDebugLocFromInst(Builder, Cmp); 4461 for (unsigned Part = 0; Part < UF; ++Part) { 4462 Value *A = State.get(User.getOperand(0), Part); 4463 Value *B = State.get(User.getOperand(1), Part); 4464 Value *C = nullptr; 4465 if (FCmp) { 4466 // Propagate fast math flags. 4467 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4468 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4469 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4470 } else { 4471 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4472 } 4473 VectorLoopValueMap.setVectorValue(&I, Part, C); 4474 addMetadata(C, &I); 4475 } 4476 4477 break; 4478 } 4479 4480 case Instruction::ZExt: 4481 case Instruction::SExt: 4482 case Instruction::FPToUI: 4483 case Instruction::FPToSI: 4484 case Instruction::FPExt: 4485 case Instruction::PtrToInt: 4486 case Instruction::IntToPtr: 4487 case Instruction::SIToFP: 4488 case Instruction::UIToFP: 4489 case Instruction::Trunc: 4490 case Instruction::FPTrunc: 4491 case Instruction::BitCast: { 4492 auto *CI = cast<CastInst>(&I); 4493 setDebugLocFromInst(Builder, CI); 4494 4495 /// Vectorize casts. 4496 Type *DestTy = 4497 (VF == 1) ? CI->getType() : FixedVectorType::get(CI->getType(), VF); 4498 4499 for (unsigned Part = 0; Part < UF; ++Part) { 4500 Value *A = State.get(User.getOperand(0), Part); 4501 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4502 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4503 addMetadata(Cast, &I); 4504 } 4505 break; 4506 } 4507 default: 4508 // This instruction is not vectorized by simple widening. 4509 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4510 llvm_unreachable("Unhandled instruction!"); 4511 } // end of switch. 4512 } 4513 4514 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands, 4515 VPTransformState &State) { 4516 assert(!isa<DbgInfoIntrinsic>(I) && 4517 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4518 setDebugLocFromInst(Builder, &I); 4519 4520 Module *M = I.getParent()->getParent()->getParent(); 4521 auto *CI = cast<CallInst>(&I); 4522 4523 SmallVector<Type *, 4> Tys; 4524 for (Value *ArgOperand : CI->arg_operands()) 4525 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4526 4527 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4528 4529 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4530 // version of the instruction. 4531 // Is it beneficial to perform intrinsic call compared to lib call? 4532 bool NeedToScalarize = false; 4533 unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4534 bool UseVectorIntrinsic = 4535 ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost; 4536 assert((UseVectorIntrinsic || !NeedToScalarize) && 4537 "Instruction should be scalarized elsewhere."); 4538 4539 for (unsigned Part = 0; Part < UF; ++Part) { 4540 SmallVector<Value *, 4> Args; 4541 for (auto &I : enumerate(ArgOperands.operands())) { 4542 // Some intrinsics have a scalar argument - don't replace it with a 4543 // vector. 4544 Value *Arg; 4545 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4546 Arg = State.get(I.value(), Part); 4547 else 4548 Arg = State.get(I.value(), {0, 0}); 4549 Args.push_back(Arg); 4550 } 4551 4552 Function *VectorF; 4553 if (UseVectorIntrinsic) { 4554 // Use vector version of the intrinsic. 4555 Type *TysForDecl[] = {CI->getType()}; 4556 if (VF > 1) 4557 TysForDecl[0] = 4558 FixedVectorType::get(CI->getType()->getScalarType(), VF); 4559 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4560 assert(VectorF && "Can't retrieve vector intrinsic."); 4561 } else { 4562 // Use vector version of the function call. 4563 const VFShape Shape = VFShape::get(*CI, ElementCount::getFixed(VF), 4564 false /*HasGlobalPred*/); 4565 #ifndef NDEBUG 4566 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4567 "Can't create vector function."); 4568 #endif 4569 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4570 } 4571 SmallVector<OperandBundleDef, 1> OpBundles; 4572 CI->getOperandBundlesAsDefs(OpBundles); 4573 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4574 4575 if (isa<FPMathOperator>(V)) 4576 V->copyFastMathFlags(CI); 4577 4578 VectorLoopValueMap.setVectorValue(&I, Part, V); 4579 addMetadata(V, &I); 4580 } 4581 } 4582 4583 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, 4584 VPUser &Operands, 4585 bool InvariantCond, 4586 VPTransformState &State) { 4587 setDebugLocFromInst(Builder, &I); 4588 4589 // The condition can be loop invariant but still defined inside the 4590 // loop. This means that we can't just use the original 'cond' value. 4591 // We have to take the 'vectorized' value and pick the first lane. 4592 // Instcombine will make this a no-op. 4593 auto *InvarCond = 4594 InvariantCond ? State.get(Operands.getOperand(0), {0, 0}) : nullptr; 4595 4596 for (unsigned Part = 0; Part < UF; ++Part) { 4597 Value *Cond = 4598 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 4599 Value *Op0 = State.get(Operands.getOperand(1), Part); 4600 Value *Op1 = State.get(Operands.getOperand(2), Part); 4601 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 4602 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4603 addMetadata(Sel, &I); 4604 } 4605 } 4606 4607 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4608 // We should not collect Scalars more than once per VF. Right now, this 4609 // function is called from collectUniformsAndScalars(), which already does 4610 // this check. Collecting Scalars for VF=1 does not make any sense. 4611 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4612 "This function should not be visited twice for the same VF"); 4613 4614 SmallSetVector<Instruction *, 8> Worklist; 4615 4616 // These sets are used to seed the analysis with pointers used by memory 4617 // accesses that will remain scalar. 4618 SmallSetVector<Instruction *, 8> ScalarPtrs; 4619 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4620 auto *Latch = TheLoop->getLoopLatch(); 4621 4622 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4623 // The pointer operands of loads and stores will be scalar as long as the 4624 // memory access is not a gather or scatter operation. The value operand of a 4625 // store will remain scalar if the store is scalarized. 4626 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4627 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4628 assert(WideningDecision != CM_Unknown && 4629 "Widening decision should be ready at this moment"); 4630 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4631 if (Ptr == Store->getValueOperand()) 4632 return WideningDecision == CM_Scalarize; 4633 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4634 "Ptr is neither a value or pointer operand"); 4635 return WideningDecision != CM_GatherScatter; 4636 }; 4637 4638 // A helper that returns true if the given value is a bitcast or 4639 // getelementptr instruction contained in the loop. 4640 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4641 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4642 isa<GetElementPtrInst>(V)) && 4643 !TheLoop->isLoopInvariant(V); 4644 }; 4645 4646 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 4647 if (!isa<PHINode>(Ptr) || 4648 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 4649 return false; 4650 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 4651 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 4652 return false; 4653 return isScalarUse(MemAccess, Ptr); 4654 }; 4655 4656 // A helper that evaluates a memory access's use of a pointer. If the 4657 // pointer is actually the pointer induction of a loop, it is being 4658 // inserted into Worklist. If the use will be a scalar use, and the 4659 // pointer is only used by memory accesses, we place the pointer in 4660 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 4661 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4662 if (isScalarPtrInduction(MemAccess, Ptr)) { 4663 Worklist.insert(cast<Instruction>(Ptr)); 4664 Instruction *Update = cast<Instruction>( 4665 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 4666 Worklist.insert(Update); 4667 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 4668 << "\n"); 4669 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Update 4670 << "\n"); 4671 return; 4672 } 4673 // We only care about bitcast and getelementptr instructions contained in 4674 // the loop. 4675 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4676 return; 4677 4678 // If the pointer has already been identified as scalar (e.g., if it was 4679 // also identified as uniform), there's nothing to do. 4680 auto *I = cast<Instruction>(Ptr); 4681 if (Worklist.count(I)) 4682 return; 4683 4684 // If the use of the pointer will be a scalar use, and all users of the 4685 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4686 // place the pointer in PossibleNonScalarPtrs. 4687 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4688 return isa<LoadInst>(U) || isa<StoreInst>(U); 4689 })) 4690 ScalarPtrs.insert(I); 4691 else 4692 PossibleNonScalarPtrs.insert(I); 4693 }; 4694 4695 // We seed the scalars analysis with three classes of instructions: (1) 4696 // instructions marked uniform-after-vectorization and (2) bitcast, 4697 // getelementptr and (pointer) phi instructions used by memory accesses 4698 // requiring a scalar use. 4699 // 4700 // (1) Add to the worklist all instructions that have been identified as 4701 // uniform-after-vectorization. 4702 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4703 4704 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4705 // memory accesses requiring a scalar use. The pointer operands of loads and 4706 // stores will be scalar as long as the memory accesses is not a gather or 4707 // scatter operation. The value operand of a store will remain scalar if the 4708 // store is scalarized. 4709 for (auto *BB : TheLoop->blocks()) 4710 for (auto &I : *BB) { 4711 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4712 evaluatePtrUse(Load, Load->getPointerOperand()); 4713 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4714 evaluatePtrUse(Store, Store->getPointerOperand()); 4715 evaluatePtrUse(Store, Store->getValueOperand()); 4716 } 4717 } 4718 for (auto *I : ScalarPtrs) 4719 if (!PossibleNonScalarPtrs.count(I)) { 4720 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4721 Worklist.insert(I); 4722 } 4723 4724 // Insert the forced scalars. 4725 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4726 // induction variable when the PHI user is scalarized. 4727 auto ForcedScalar = ForcedScalars.find(VF); 4728 if (ForcedScalar != ForcedScalars.end()) 4729 for (auto *I : ForcedScalar->second) 4730 Worklist.insert(I); 4731 4732 // Expand the worklist by looking through any bitcasts and getelementptr 4733 // instructions we've already identified as scalar. This is similar to the 4734 // expansion step in collectLoopUniforms(); however, here we're only 4735 // expanding to include additional bitcasts and getelementptr instructions. 4736 unsigned Idx = 0; 4737 while (Idx != Worklist.size()) { 4738 Instruction *Dst = Worklist[Idx++]; 4739 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4740 continue; 4741 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4742 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4743 auto *J = cast<Instruction>(U); 4744 return !TheLoop->contains(J) || Worklist.count(J) || 4745 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4746 isScalarUse(J, Src)); 4747 })) { 4748 Worklist.insert(Src); 4749 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4750 } 4751 } 4752 4753 // An induction variable will remain scalar if all users of the induction 4754 // variable and induction variable update remain scalar. 4755 for (auto &Induction : Legal->getInductionVars()) { 4756 auto *Ind = Induction.first; 4757 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4758 4759 // If tail-folding is applied, the primary induction variable will be used 4760 // to feed a vector compare. 4761 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4762 continue; 4763 4764 // Determine if all users of the induction variable are scalar after 4765 // vectorization. 4766 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4767 auto *I = cast<Instruction>(U); 4768 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4769 }); 4770 if (!ScalarInd) 4771 continue; 4772 4773 // Determine if all users of the induction variable update instruction are 4774 // scalar after vectorization. 4775 auto ScalarIndUpdate = 4776 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4777 auto *I = cast<Instruction>(U); 4778 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4779 }); 4780 if (!ScalarIndUpdate) 4781 continue; 4782 4783 // The induction variable and its update instruction will remain scalar. 4784 Worklist.insert(Ind); 4785 Worklist.insert(IndUpdate); 4786 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4787 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4788 << "\n"); 4789 } 4790 4791 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4792 } 4793 4794 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4795 if (!blockNeedsPredication(I->getParent())) 4796 return false; 4797 switch(I->getOpcode()) { 4798 default: 4799 break; 4800 case Instruction::Load: 4801 case Instruction::Store: { 4802 if (!Legal->isMaskRequired(I)) 4803 return false; 4804 auto *Ptr = getLoadStorePointerOperand(I); 4805 auto *Ty = getMemInstValueType(I); 4806 // We have already decided how to vectorize this instruction, get that 4807 // result. 4808 if (VF > 1) { 4809 InstWidening WideningDecision = getWideningDecision(I, VF); 4810 assert(WideningDecision != CM_Unknown && 4811 "Widening decision should be ready at this moment"); 4812 return WideningDecision == CM_Scalarize; 4813 } 4814 const Align Alignment = getLoadStoreAlignment(I); 4815 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4816 isLegalMaskedGather(Ty, Alignment)) 4817 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4818 isLegalMaskedScatter(Ty, Alignment)); 4819 } 4820 case Instruction::UDiv: 4821 case Instruction::SDiv: 4822 case Instruction::SRem: 4823 case Instruction::URem: 4824 return mayDivideByZero(*I); 4825 } 4826 return false; 4827 } 4828 4829 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4830 unsigned VF) { 4831 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4832 assert(getWideningDecision(I, VF) == CM_Unknown && 4833 "Decision should not be set yet."); 4834 auto *Group = getInterleavedAccessGroup(I); 4835 assert(Group && "Must have a group."); 4836 4837 // If the instruction's allocated size doesn't equal it's type size, it 4838 // requires padding and will be scalarized. 4839 auto &DL = I->getModule()->getDataLayout(); 4840 auto *ScalarTy = getMemInstValueType(I); 4841 if (hasIrregularType(ScalarTy, DL, VF)) 4842 return false; 4843 4844 // Check if masking is required. 4845 // A Group may need masking for one of two reasons: it resides in a block that 4846 // needs predication, or it was decided to use masking to deal with gaps. 4847 bool PredicatedAccessRequiresMasking = 4848 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4849 bool AccessWithGapsRequiresMasking = 4850 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 4851 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4852 return true; 4853 4854 // If masked interleaving is required, we expect that the user/target had 4855 // enabled it, because otherwise it either wouldn't have been created or 4856 // it should have been invalidated by the CostModel. 4857 assert(useMaskedInterleavedAccesses(TTI) && 4858 "Masked interleave-groups for predicated accesses are not enabled."); 4859 4860 auto *Ty = getMemInstValueType(I); 4861 const Align Alignment = getLoadStoreAlignment(I); 4862 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4863 : TTI.isLegalMaskedStore(Ty, Alignment); 4864 } 4865 4866 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4867 unsigned VF) { 4868 // Get and ensure we have a valid memory instruction. 4869 LoadInst *LI = dyn_cast<LoadInst>(I); 4870 StoreInst *SI = dyn_cast<StoreInst>(I); 4871 assert((LI || SI) && "Invalid memory instruction"); 4872 4873 auto *Ptr = getLoadStorePointerOperand(I); 4874 4875 // In order to be widened, the pointer should be consecutive, first of all. 4876 if (!Legal->isConsecutivePtr(Ptr)) 4877 return false; 4878 4879 // If the instruction is a store located in a predicated block, it will be 4880 // scalarized. 4881 if (isScalarWithPredication(I)) 4882 return false; 4883 4884 // If the instruction's allocated size doesn't equal it's type size, it 4885 // requires padding and will be scalarized. 4886 auto &DL = I->getModule()->getDataLayout(); 4887 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4888 if (hasIrregularType(ScalarTy, DL, VF)) 4889 return false; 4890 4891 return true; 4892 } 4893 4894 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4895 // We should not collect Uniforms more than once per VF. Right now, 4896 // this function is called from collectUniformsAndScalars(), which 4897 // already does this check. Collecting Uniforms for VF=1 does not make any 4898 // sense. 4899 4900 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4901 "This function should not be visited twice for the same VF"); 4902 4903 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4904 // not analyze again. Uniforms.count(VF) will return 1. 4905 Uniforms[VF].clear(); 4906 4907 // We now know that the loop is vectorizable! 4908 // Collect instructions inside the loop that will remain uniform after 4909 // vectorization. 4910 4911 // Global values, params and instructions outside of current loop are out of 4912 // scope. 4913 auto isOutOfScope = [&](Value *V) -> bool { 4914 Instruction *I = dyn_cast<Instruction>(V); 4915 return (!I || !TheLoop->contains(I)); 4916 }; 4917 4918 SetVector<Instruction *> Worklist; 4919 BasicBlock *Latch = TheLoop->getLoopLatch(); 4920 4921 // Instructions that are scalar with predication must not be considered 4922 // uniform after vectorization, because that would create an erroneous 4923 // replicating region where only a single instance out of VF should be formed. 4924 // TODO: optimize such seldom cases if found important, see PR40816. 4925 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4926 if (isScalarWithPredication(I, VF)) { 4927 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4928 << *I << "\n"); 4929 return; 4930 } 4931 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4932 Worklist.insert(I); 4933 }; 4934 4935 // Start with the conditional branch. If the branch condition is an 4936 // instruction contained in the loop that is only used by the branch, it is 4937 // uniform. 4938 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4939 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4940 addToWorklistIfAllowed(Cmp); 4941 4942 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4943 // are pointers that are treated like consecutive pointers during 4944 // vectorization. The pointer operands of interleaved accesses are an 4945 // example. 4946 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4947 4948 // Holds pointer operands of instructions that are possibly non-uniform. 4949 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4950 4951 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4952 InstWidening WideningDecision = getWideningDecision(I, VF); 4953 assert(WideningDecision != CM_Unknown && 4954 "Widening decision should be ready at this moment"); 4955 4956 return (WideningDecision == CM_Widen || 4957 WideningDecision == CM_Widen_Reverse || 4958 WideningDecision == CM_Interleave); 4959 }; 4960 // Iterate over the instructions in the loop, and collect all 4961 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4962 // that a consecutive-like pointer operand will be scalarized, we collect it 4963 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4964 // getelementptr instruction can be used by both vectorized and scalarized 4965 // memory instructions. For example, if a loop loads and stores from the same 4966 // location, but the store is conditional, the store will be scalarized, and 4967 // the getelementptr won't remain uniform. 4968 for (auto *BB : TheLoop->blocks()) 4969 for (auto &I : *BB) { 4970 // If there's no pointer operand, there's nothing to do. 4971 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4972 if (!Ptr) 4973 continue; 4974 4975 // True if all users of Ptr are memory accesses that have Ptr as their 4976 // pointer operand. 4977 auto UsersAreMemAccesses = 4978 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4979 return getLoadStorePointerOperand(U) == Ptr; 4980 }); 4981 4982 // Ensure the memory instruction will not be scalarized or used by 4983 // gather/scatter, making its pointer operand non-uniform. If the pointer 4984 // operand is used by any instruction other than a memory access, we 4985 // conservatively assume the pointer operand may be non-uniform. 4986 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4987 PossibleNonUniformPtrs.insert(Ptr); 4988 4989 // If the memory instruction will be vectorized and its pointer operand 4990 // is consecutive-like, or interleaving - the pointer operand should 4991 // remain uniform. 4992 else 4993 ConsecutiveLikePtrs.insert(Ptr); 4994 } 4995 4996 // Add to the Worklist all consecutive and consecutive-like pointers that 4997 // aren't also identified as possibly non-uniform. 4998 for (auto *V : ConsecutiveLikePtrs) 4999 if (!PossibleNonUniformPtrs.count(V)) 5000 addToWorklistIfAllowed(V); 5001 5002 // Expand Worklist in topological order: whenever a new instruction 5003 // is added , its users should be already inside Worklist. It ensures 5004 // a uniform instruction will only be used by uniform instructions. 5005 unsigned idx = 0; 5006 while (idx != Worklist.size()) { 5007 Instruction *I = Worklist[idx++]; 5008 5009 for (auto OV : I->operand_values()) { 5010 // isOutOfScope operands cannot be uniform instructions. 5011 if (isOutOfScope(OV)) 5012 continue; 5013 // First order recurrence Phi's should typically be considered 5014 // non-uniform. 5015 auto *OP = dyn_cast<PHINode>(OV); 5016 if (OP && Legal->isFirstOrderRecurrence(OP)) 5017 continue; 5018 // If all the users of the operand are uniform, then add the 5019 // operand into the uniform worklist. 5020 auto *OI = cast<Instruction>(OV); 5021 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5022 auto *J = cast<Instruction>(U); 5023 return Worklist.count(J) || 5024 (OI == getLoadStorePointerOperand(J) && 5025 isUniformDecision(J, VF)); 5026 })) 5027 addToWorklistIfAllowed(OI); 5028 } 5029 } 5030 5031 // Returns true if Ptr is the pointer operand of a memory access instruction 5032 // I, and I is known to not require scalarization. 5033 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5034 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5035 }; 5036 5037 // For an instruction to be added into Worklist above, all its users inside 5038 // the loop should also be in Worklist. However, this condition cannot be 5039 // true for phi nodes that form a cyclic dependence. We must process phi 5040 // nodes separately. An induction variable will remain uniform if all users 5041 // of the induction variable and induction variable update remain uniform. 5042 // The code below handles both pointer and non-pointer induction variables. 5043 for (auto &Induction : Legal->getInductionVars()) { 5044 auto *Ind = Induction.first; 5045 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5046 5047 // Determine if all users of the induction variable are uniform after 5048 // vectorization. 5049 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5050 auto *I = cast<Instruction>(U); 5051 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5052 isVectorizedMemAccessUse(I, Ind); 5053 }); 5054 if (!UniformInd) 5055 continue; 5056 5057 // Determine if all users of the induction variable update instruction are 5058 // uniform after vectorization. 5059 auto UniformIndUpdate = 5060 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5061 auto *I = cast<Instruction>(U); 5062 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5063 isVectorizedMemAccessUse(I, IndUpdate); 5064 }); 5065 if (!UniformIndUpdate) 5066 continue; 5067 5068 // The induction variable and its update instruction will remain uniform. 5069 addToWorklistIfAllowed(Ind); 5070 addToWorklistIfAllowed(IndUpdate); 5071 } 5072 5073 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5074 } 5075 5076 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5077 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5078 5079 if (Legal->getRuntimePointerChecking()->Need) { 5080 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5081 "runtime pointer checks needed. Enable vectorization of this " 5082 "loop with '#pragma clang loop vectorize(enable)' when " 5083 "compiling with -Os/-Oz", 5084 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5085 return true; 5086 } 5087 5088 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5089 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5090 "runtime SCEV checks needed. Enable vectorization of this " 5091 "loop with '#pragma clang loop vectorize(enable)' when " 5092 "compiling with -Os/-Oz", 5093 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5094 return true; 5095 } 5096 5097 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5098 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5099 reportVectorizationFailure("Runtime stride check for small trip count", 5100 "runtime stride == 1 checks needed. Enable vectorization of " 5101 "this loop without such check by compiling with -Os/-Oz", 5102 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5103 return true; 5104 } 5105 5106 return false; 5107 } 5108 5109 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(unsigned UserVF, 5110 unsigned UserIC) { 5111 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5112 // TODO: It may by useful to do since it's still likely to be dynamically 5113 // uniform if the target can skip. 5114 reportVectorizationFailure( 5115 "Not inserting runtime ptr check for divergent target", 5116 "runtime pointer checks needed. Not enabled for divergent target", 5117 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5118 return None; 5119 } 5120 5121 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5122 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5123 if (TC == 1) { 5124 reportVectorizationFailure("Single iteration (non) loop", 5125 "loop trip count is one, irrelevant for vectorization", 5126 "SingleIterationLoop", ORE, TheLoop); 5127 return None; 5128 } 5129 5130 switch (ScalarEpilogueStatus) { 5131 case CM_ScalarEpilogueAllowed: 5132 return UserVF ? UserVF : computeFeasibleMaxVF(TC); 5133 case CM_ScalarEpilogueNotNeededUsePredicate: 5134 LLVM_DEBUG( 5135 dbgs() << "LV: vector predicate hint/switch found.\n" 5136 << "LV: Not allowing scalar epilogue, creating predicated " 5137 << "vector loop.\n"); 5138 break; 5139 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5140 // fallthrough as a special case of OptForSize 5141 case CM_ScalarEpilogueNotAllowedOptSize: 5142 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5143 LLVM_DEBUG( 5144 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5145 else 5146 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5147 << "count.\n"); 5148 5149 // Bail if runtime checks are required, which are not good when optimising 5150 // for size. 5151 if (runtimeChecksRequired()) 5152 return None; 5153 break; 5154 } 5155 5156 // Now try the tail folding 5157 5158 // Invalidate interleave groups that require an epilogue if we can't mask 5159 // the interleave-group. 5160 if (!useMaskedInterleavedAccesses(TTI)) { 5161 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5162 "No decisions should have been taken at this point"); 5163 // Note: There is no need to invalidate any cost modeling decisions here, as 5164 // non where taken so far. 5165 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5166 } 5167 5168 unsigned MaxVF = UserVF ? UserVF : computeFeasibleMaxVF(TC); 5169 assert((UserVF || isPowerOf2_32(MaxVF)) && "MaxVF must be a power of 2"); 5170 unsigned MaxVFtimesIC = UserIC ? MaxVF * UserIC : MaxVF; 5171 if (TC > 0 && TC % MaxVFtimesIC == 0) { 5172 // Accept MaxVF if we do not have a tail. 5173 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5174 return MaxVF; 5175 } 5176 5177 // If we don't know the precise trip count, or if the trip count that we 5178 // found modulo the vectorization factor is not zero, try to fold the tail 5179 // by masking. 5180 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5181 if (Legal->prepareToFoldTailByMasking()) { 5182 FoldTailByMasking = true; 5183 return MaxVF; 5184 } 5185 5186 if (TC == 0) { 5187 reportVectorizationFailure( 5188 "Unable to calculate the loop count due to complex control flow", 5189 "unable to calculate the loop count due to complex control flow", 5190 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5191 return None; 5192 } 5193 5194 reportVectorizationFailure( 5195 "Cannot optimize for size and vectorize at the same time.", 5196 "cannot optimize for size and vectorize at the same time. " 5197 "Enable vectorization of this loop with '#pragma clang loop " 5198 "vectorize(enable)' when compiling with -Os/-Oz", 5199 "NoTailLoopWithOptForSize", ORE, TheLoop); 5200 return None; 5201 } 5202 5203 unsigned 5204 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) { 5205 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5206 unsigned SmallestType, WidestType; 5207 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5208 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5209 5210 // Get the maximum safe dependence distance in bits computed by LAA. 5211 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5212 // the memory accesses that is most restrictive (involved in the smallest 5213 // dependence distance). 5214 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 5215 5216 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 5217 5218 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5219 // Note that both WidestRegister and WidestType may not be a powers of 2. 5220 unsigned MaxVectorSize = PowerOf2Floor(WidestRegister / WidestType); 5221 5222 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5223 << " / " << WidestType << " bits.\n"); 5224 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5225 << WidestRegister << " bits.\n"); 5226 5227 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 5228 " into one vector!"); 5229 if (MaxVectorSize == 0) { 5230 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5231 MaxVectorSize = 1; 5232 return MaxVectorSize; 5233 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 5234 isPowerOf2_32(ConstTripCount)) { 5235 // We need to clamp the VF to be the ConstTripCount. There is no point in 5236 // choosing a higher viable VF as done in the loop below. 5237 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5238 << ConstTripCount << "\n"); 5239 MaxVectorSize = ConstTripCount; 5240 return MaxVectorSize; 5241 } 5242 5243 unsigned MaxVF = MaxVectorSize; 5244 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5245 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5246 // Collect all viable vectorization factors larger than the default MaxVF 5247 // (i.e. MaxVectorSize). 5248 SmallVector<unsigned, 8> VFs; 5249 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5250 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 5251 VFs.push_back(VS); 5252 5253 // For each VF calculate its register usage. 5254 auto RUs = calculateRegisterUsage(VFs); 5255 5256 // Select the largest VF which doesn't require more registers than existing 5257 // ones. 5258 for (int i = RUs.size() - 1; i >= 0; --i) { 5259 bool Selected = true; 5260 for (auto& pair : RUs[i].MaxLocalUsers) { 5261 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5262 if (pair.second > TargetNumRegisters) 5263 Selected = false; 5264 } 5265 if (Selected) { 5266 MaxVF = VFs[i]; 5267 break; 5268 } 5269 } 5270 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 5271 if (MaxVF < MinVF) { 5272 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5273 << ") with target's minimum: " << MinVF << '\n'); 5274 MaxVF = MinVF; 5275 } 5276 } 5277 } 5278 return MaxVF; 5279 } 5280 5281 VectorizationFactor 5282 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 5283 float Cost = expectedCost(1).first; 5284 const float ScalarCost = Cost; 5285 unsigned Width = 1; 5286 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5287 5288 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5289 if (ForceVectorization && MaxVF > 1) { 5290 // Ignore scalar width, because the user explicitly wants vectorization. 5291 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5292 // evaluation. 5293 Cost = std::numeric_limits<float>::max(); 5294 } 5295 5296 for (unsigned i = 2; i <= MaxVF; i *= 2) { 5297 // Notice that the vector loop needs to be executed less times, so 5298 // we need to divide the cost of the vector loops by the width of 5299 // the vector elements. 5300 VectorizationCostTy C = expectedCost(i); 5301 float VectorCost = C.first / (float)i; 5302 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5303 << " costs: " << (int)VectorCost << ".\n"); 5304 if (!C.second && !ForceVectorization) { 5305 LLVM_DEBUG( 5306 dbgs() << "LV: Not considering vector loop of width " << i 5307 << " because it will not generate any vector instructions.\n"); 5308 continue; 5309 } 5310 if (VectorCost < Cost) { 5311 Cost = VectorCost; 5312 Width = i; 5313 } 5314 } 5315 5316 if (!EnableCondStoresVectorization && NumPredStores) { 5317 reportVectorizationFailure("There are conditional stores.", 5318 "store that is conditionally executed prevents vectorization", 5319 "ConditionalStore", ORE, TheLoop); 5320 Width = 1; 5321 Cost = ScalarCost; 5322 } 5323 5324 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5325 << "LV: Vectorization seems to be not beneficial, " 5326 << "but was forced by a user.\n"); 5327 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5328 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 5329 return Factor; 5330 } 5331 5332 std::pair<unsigned, unsigned> 5333 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5334 unsigned MinWidth = -1U; 5335 unsigned MaxWidth = 8; 5336 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5337 5338 // For each block. 5339 for (BasicBlock *BB : TheLoop->blocks()) { 5340 // For each instruction in the loop. 5341 for (Instruction &I : BB->instructionsWithoutDebug()) { 5342 Type *T = I.getType(); 5343 5344 // Skip ignored values. 5345 if (ValuesToIgnore.count(&I)) 5346 continue; 5347 5348 // Only examine Loads, Stores and PHINodes. 5349 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5350 continue; 5351 5352 // Examine PHI nodes that are reduction variables. Update the type to 5353 // account for the recurrence type. 5354 if (auto *PN = dyn_cast<PHINode>(&I)) { 5355 if (!Legal->isReductionVariable(PN)) 5356 continue; 5357 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 5358 T = RdxDesc.getRecurrenceType(); 5359 } 5360 5361 // Examine the stored values. 5362 if (auto *ST = dyn_cast<StoreInst>(&I)) 5363 T = ST->getValueOperand()->getType(); 5364 5365 // Ignore loaded pointer types and stored pointer types that are not 5366 // vectorizable. 5367 // 5368 // FIXME: The check here attempts to predict whether a load or store will 5369 // be vectorized. We only know this for certain after a VF has 5370 // been selected. Here, we assume that if an access can be 5371 // vectorized, it will be. We should also look at extending this 5372 // optimization to non-pointer types. 5373 // 5374 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 5375 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 5376 continue; 5377 5378 MinWidth = std::min(MinWidth, 5379 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5380 MaxWidth = std::max(MaxWidth, 5381 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5382 } 5383 } 5384 5385 return {MinWidth, MaxWidth}; 5386 } 5387 5388 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF, 5389 unsigned LoopCost) { 5390 // -- The interleave heuristics -- 5391 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5392 // There are many micro-architectural considerations that we can't predict 5393 // at this level. For example, frontend pressure (on decode or fetch) due to 5394 // code size, or the number and capabilities of the execution ports. 5395 // 5396 // We use the following heuristics to select the interleave count: 5397 // 1. If the code has reductions, then we interleave to break the cross 5398 // iteration dependency. 5399 // 2. If the loop is really small, then we interleave to reduce the loop 5400 // overhead. 5401 // 3. We don't interleave if we think that we will spill registers to memory 5402 // due to the increased register pressure. 5403 5404 if (!isScalarEpilogueAllowed()) 5405 return 1; 5406 5407 // We used the distance for the interleave count. 5408 if (Legal->getMaxSafeDepDistBytes() != -1U) 5409 return 1; 5410 5411 // Do not interleave loops with a relatively small known or estimated trip 5412 // count. 5413 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5414 if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold) 5415 return 1; 5416 5417 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5418 // We divide by these constants so assume that we have at least one 5419 // instruction that uses at least one register. 5420 for (auto& pair : R.MaxLocalUsers) { 5421 pair.second = std::max(pair.second, 1U); 5422 } 5423 5424 // We calculate the interleave count using the following formula. 5425 // Subtract the number of loop invariants from the number of available 5426 // registers. These registers are used by all of the interleaved instances. 5427 // Next, divide the remaining registers by the number of registers that is 5428 // required by the loop, in order to estimate how many parallel instances 5429 // fit without causing spills. All of this is rounded down if necessary to be 5430 // a power of two. We want power of two interleave count to simplify any 5431 // addressing operations or alignment considerations. 5432 // We also want power of two interleave counts to ensure that the induction 5433 // variable of the vector loop wraps to zero, when tail is folded by masking; 5434 // this currently happens when OptForSize, in which case IC is set to 1 above. 5435 unsigned IC = UINT_MAX; 5436 5437 for (auto& pair : R.MaxLocalUsers) { 5438 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5439 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5440 << " registers of " 5441 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5442 if (VF == 1) { 5443 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5444 TargetNumRegisters = ForceTargetNumScalarRegs; 5445 } else { 5446 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5447 TargetNumRegisters = ForceTargetNumVectorRegs; 5448 } 5449 unsigned MaxLocalUsers = pair.second; 5450 unsigned LoopInvariantRegs = 0; 5451 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5452 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5453 5454 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5455 // Don't count the induction variable as interleaved. 5456 if (EnableIndVarRegisterHeur) { 5457 TmpIC = 5458 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5459 std::max(1U, (MaxLocalUsers - 1))); 5460 } 5461 5462 IC = std::min(IC, TmpIC); 5463 } 5464 5465 // Clamp the interleave ranges to reasonable counts. 5466 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5467 5468 // Check if the user has overridden the max. 5469 if (VF == 1) { 5470 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5471 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5472 } else { 5473 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5474 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5475 } 5476 5477 // If trip count is known or estimated compile time constant, limit the 5478 // interleave count to be less than the trip count divided by VF. 5479 if (BestKnownTC) { 5480 MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount); 5481 } 5482 5483 // If we did not calculate the cost for VF (because the user selected the VF) 5484 // then we calculate the cost of VF here. 5485 if (LoopCost == 0) 5486 LoopCost = expectedCost(VF).first; 5487 5488 assert(LoopCost && "Non-zero loop cost expected"); 5489 5490 // Clamp the calculated IC to be between the 1 and the max interleave count 5491 // that the target and trip count allows. 5492 if (IC > MaxInterleaveCount) 5493 IC = MaxInterleaveCount; 5494 else if (IC < 1) 5495 IC = 1; 5496 5497 // Interleave if we vectorized this loop and there is a reduction that could 5498 // benefit from interleaving. 5499 if (VF > 1 && !Legal->getReductionVars().empty()) { 5500 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5501 return IC; 5502 } 5503 5504 // Note that if we've already vectorized the loop we will have done the 5505 // runtime check and so interleaving won't require further checks. 5506 bool InterleavingRequiresRuntimePointerCheck = 5507 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5508 5509 // We want to interleave small loops in order to reduce the loop overhead and 5510 // potentially expose ILP opportunities. 5511 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5512 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5513 // We assume that the cost overhead is 1 and we use the cost model 5514 // to estimate the cost of the loop and interleave until the cost of the 5515 // loop overhead is about 5% of the cost of the loop. 5516 unsigned SmallIC = 5517 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5518 5519 // Interleave until store/load ports (estimated by max interleave count) are 5520 // saturated. 5521 unsigned NumStores = Legal->getNumStores(); 5522 unsigned NumLoads = Legal->getNumLoads(); 5523 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5524 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5525 5526 // If we have a scalar reduction (vector reductions are already dealt with 5527 // by this point), we can increase the critical path length if the loop 5528 // we're interleaving is inside another loop. Limit, by default to 2, so the 5529 // critical path only gets increased by one reduction operation. 5530 if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) { 5531 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5532 SmallIC = std::min(SmallIC, F); 5533 StoresIC = std::min(StoresIC, F); 5534 LoadsIC = std::min(LoadsIC, F); 5535 } 5536 5537 if (EnableLoadStoreRuntimeInterleave && 5538 std::max(StoresIC, LoadsIC) > SmallIC) { 5539 LLVM_DEBUG( 5540 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5541 return std::max(StoresIC, LoadsIC); 5542 } 5543 5544 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5545 return SmallIC; 5546 } 5547 5548 // Interleave if this is a large loop (small loops are already dealt with by 5549 // this point) that could benefit from interleaving. 5550 bool HasReductions = !Legal->getReductionVars().empty(); 5551 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5552 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5553 return IC; 5554 } 5555 5556 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5557 return 1; 5558 } 5559 5560 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5561 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5562 // This function calculates the register usage by measuring the highest number 5563 // of values that are alive at a single location. Obviously, this is a very 5564 // rough estimation. We scan the loop in a topological order in order and 5565 // assign a number to each instruction. We use RPO to ensure that defs are 5566 // met before their users. We assume that each instruction that has in-loop 5567 // users starts an interval. We record every time that an in-loop value is 5568 // used, so we have a list of the first and last occurrences of each 5569 // instruction. Next, we transpose this data structure into a multi map that 5570 // holds the list of intervals that *end* at a specific location. This multi 5571 // map allows us to perform a linear search. We scan the instructions linearly 5572 // and record each time that a new interval starts, by placing it in a set. 5573 // If we find this value in the multi-map then we remove it from the set. 5574 // The max register usage is the maximum size of the set. 5575 // We also search for instructions that are defined outside the loop, but are 5576 // used inside the loop. We need this number separately from the max-interval 5577 // usage number because when we unroll, loop-invariant values do not take 5578 // more register. 5579 LoopBlocksDFS DFS(TheLoop); 5580 DFS.perform(LI); 5581 5582 RegisterUsage RU; 5583 5584 // Each 'key' in the map opens a new interval. The values 5585 // of the map are the index of the 'last seen' usage of the 5586 // instruction that is the key. 5587 using IntervalMap = DenseMap<Instruction *, unsigned>; 5588 5589 // Maps instruction to its index. 5590 SmallVector<Instruction *, 64> IdxToInstr; 5591 // Marks the end of each interval. 5592 IntervalMap EndPoint; 5593 // Saves the list of instruction indices that are used in the loop. 5594 SmallPtrSet<Instruction *, 8> Ends; 5595 // Saves the list of values that are used in the loop but are 5596 // defined outside the loop, such as arguments and constants. 5597 SmallPtrSet<Value *, 8> LoopInvariants; 5598 5599 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5600 for (Instruction &I : BB->instructionsWithoutDebug()) { 5601 IdxToInstr.push_back(&I); 5602 5603 // Save the end location of each USE. 5604 for (Value *U : I.operands()) { 5605 auto *Instr = dyn_cast<Instruction>(U); 5606 5607 // Ignore non-instruction values such as arguments, constants, etc. 5608 if (!Instr) 5609 continue; 5610 5611 // If this instruction is outside the loop then record it and continue. 5612 if (!TheLoop->contains(Instr)) { 5613 LoopInvariants.insert(Instr); 5614 continue; 5615 } 5616 5617 // Overwrite previous end points. 5618 EndPoint[Instr] = IdxToInstr.size(); 5619 Ends.insert(Instr); 5620 } 5621 } 5622 } 5623 5624 // Saves the list of intervals that end with the index in 'key'. 5625 using InstrList = SmallVector<Instruction *, 2>; 5626 DenseMap<unsigned, InstrList> TransposeEnds; 5627 5628 // Transpose the EndPoints to a list of values that end at each index. 5629 for (auto &Interval : EndPoint) 5630 TransposeEnds[Interval.second].push_back(Interval.first); 5631 5632 SmallPtrSet<Instruction *, 8> OpenIntervals; 5633 5634 // Get the size of the widest register. 5635 unsigned MaxSafeDepDist = -1U; 5636 if (Legal->getMaxSafeDepDistBytes() != -1U) 5637 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5638 unsigned WidestRegister = 5639 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5640 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5641 5642 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5643 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5644 5645 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5646 5647 // A lambda that gets the register usage for the given type and VF. 5648 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5649 if (Ty->isTokenTy()) 5650 return 0U; 5651 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5652 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5653 }; 5654 5655 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5656 Instruction *I = IdxToInstr[i]; 5657 5658 // Remove all of the instructions that end at this location. 5659 InstrList &List = TransposeEnds[i]; 5660 for (Instruction *ToRemove : List) 5661 OpenIntervals.erase(ToRemove); 5662 5663 // Ignore instructions that are never used within the loop. 5664 if (!Ends.count(I)) 5665 continue; 5666 5667 // Skip ignored values. 5668 if (ValuesToIgnore.count(I)) 5669 continue; 5670 5671 // For each VF find the maximum usage of registers. 5672 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5673 // Count the number of live intervals. 5674 SmallMapVector<unsigned, unsigned, 4> RegUsage; 5675 5676 if (VFs[j] == 1) { 5677 for (auto Inst : OpenIntervals) { 5678 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5679 if (RegUsage.find(ClassID) == RegUsage.end()) 5680 RegUsage[ClassID] = 1; 5681 else 5682 RegUsage[ClassID] += 1; 5683 } 5684 } else { 5685 collectUniformsAndScalars(VFs[j]); 5686 for (auto Inst : OpenIntervals) { 5687 // Skip ignored values for VF > 1. 5688 if (VecValuesToIgnore.count(Inst)) 5689 continue; 5690 if (isScalarAfterVectorization(Inst, VFs[j])) { 5691 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5692 if (RegUsage.find(ClassID) == RegUsage.end()) 5693 RegUsage[ClassID] = 1; 5694 else 5695 RegUsage[ClassID] += 1; 5696 } else { 5697 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 5698 if (RegUsage.find(ClassID) == RegUsage.end()) 5699 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 5700 else 5701 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 5702 } 5703 } 5704 } 5705 5706 for (auto& pair : RegUsage) { 5707 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 5708 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 5709 else 5710 MaxUsages[j][pair.first] = pair.second; 5711 } 5712 } 5713 5714 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5715 << OpenIntervals.size() << '\n'); 5716 5717 // Add the current instruction to the list of open intervals. 5718 OpenIntervals.insert(I); 5719 } 5720 5721 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5722 SmallMapVector<unsigned, unsigned, 4> Invariant; 5723 5724 for (auto Inst : LoopInvariants) { 5725 unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 5726 unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType()); 5727 if (Invariant.find(ClassID) == Invariant.end()) 5728 Invariant[ClassID] = Usage; 5729 else 5730 Invariant[ClassID] += Usage; 5731 } 5732 5733 LLVM_DEBUG({ 5734 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 5735 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 5736 << " item\n"; 5737 for (const auto &pair : MaxUsages[i]) { 5738 dbgs() << "LV(REG): RegisterClass: " 5739 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 5740 << " registers\n"; 5741 } 5742 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 5743 << " item\n"; 5744 for (const auto &pair : Invariant) { 5745 dbgs() << "LV(REG): RegisterClass: " 5746 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 5747 << " registers\n"; 5748 } 5749 }); 5750 5751 RU.LoopInvariantRegs = Invariant; 5752 RU.MaxLocalUsers = MaxUsages[i]; 5753 RUs[i] = RU; 5754 } 5755 5756 return RUs; 5757 } 5758 5759 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5760 // TODO: Cost model for emulated masked load/store is completely 5761 // broken. This hack guides the cost model to use an artificially 5762 // high enough value to practically disable vectorization with such 5763 // operations, except where previously deployed legality hack allowed 5764 // using very low cost values. This is to avoid regressions coming simply 5765 // from moving "masked load/store" check from legality to cost model. 5766 // Masked Load/Gather emulation was previously never allowed. 5767 // Limited number of Masked Store/Scatter emulation was allowed. 5768 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5769 return isa<LoadInst>(I) || 5770 (isa<StoreInst>(I) && 5771 NumPredStores > NumberOfStoresToPredicate); 5772 } 5773 5774 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5775 // If we aren't vectorizing the loop, or if we've already collected the 5776 // instructions to scalarize, there's nothing to do. Collection may already 5777 // have occurred if we have a user-selected VF and are now computing the 5778 // expected cost for interleaving. 5779 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5780 return; 5781 5782 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5783 // not profitable to scalarize any instructions, the presence of VF in the 5784 // map will indicate that we've analyzed it already. 5785 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5786 5787 // Find all the instructions that are scalar with predication in the loop and 5788 // determine if it would be better to not if-convert the blocks they are in. 5789 // If so, we also record the instructions to scalarize. 5790 for (BasicBlock *BB : TheLoop->blocks()) { 5791 if (!blockNeedsPredication(BB)) 5792 continue; 5793 for (Instruction &I : *BB) 5794 if (isScalarWithPredication(&I)) { 5795 ScalarCostsTy ScalarCosts; 5796 // Do not apply discount logic if hacked cost is needed 5797 // for emulated masked memrefs. 5798 if (!useEmulatedMaskMemRefHack(&I) && 5799 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5800 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5801 // Remember that BB will remain after vectorization. 5802 PredicatedBBsAfterVectorization.insert(BB); 5803 } 5804 } 5805 } 5806 5807 int LoopVectorizationCostModel::computePredInstDiscount( 5808 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5809 unsigned VF) { 5810 assert(!isUniformAfterVectorization(PredInst, VF) && 5811 "Instruction marked uniform-after-vectorization will be predicated"); 5812 5813 // Initialize the discount to zero, meaning that the scalar version and the 5814 // vector version cost the same. 5815 int Discount = 0; 5816 5817 // Holds instructions to analyze. The instructions we visit are mapped in 5818 // ScalarCosts. Those instructions are the ones that would be scalarized if 5819 // we find that the scalar version costs less. 5820 SmallVector<Instruction *, 8> Worklist; 5821 5822 // Returns true if the given instruction can be scalarized. 5823 auto canBeScalarized = [&](Instruction *I) -> bool { 5824 // We only attempt to scalarize instructions forming a single-use chain 5825 // from the original predicated block that would otherwise be vectorized. 5826 // Although not strictly necessary, we give up on instructions we know will 5827 // already be scalar to avoid traversing chains that are unlikely to be 5828 // beneficial. 5829 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5830 isScalarAfterVectorization(I, VF)) 5831 return false; 5832 5833 // If the instruction is scalar with predication, it will be analyzed 5834 // separately. We ignore it within the context of PredInst. 5835 if (isScalarWithPredication(I)) 5836 return false; 5837 5838 // If any of the instruction's operands are uniform after vectorization, 5839 // the instruction cannot be scalarized. This prevents, for example, a 5840 // masked load from being scalarized. 5841 // 5842 // We assume we will only emit a value for lane zero of an instruction 5843 // marked uniform after vectorization, rather than VF identical values. 5844 // Thus, if we scalarize an instruction that uses a uniform, we would 5845 // create uses of values corresponding to the lanes we aren't emitting code 5846 // for. This behavior can be changed by allowing getScalarValue to clone 5847 // the lane zero values for uniforms rather than asserting. 5848 for (Use &U : I->operands()) 5849 if (auto *J = dyn_cast<Instruction>(U.get())) 5850 if (isUniformAfterVectorization(J, VF)) 5851 return false; 5852 5853 // Otherwise, we can scalarize the instruction. 5854 return true; 5855 }; 5856 5857 // Compute the expected cost discount from scalarizing the entire expression 5858 // feeding the predicated instruction. We currently only consider expressions 5859 // that are single-use instruction chains. 5860 Worklist.push_back(PredInst); 5861 while (!Worklist.empty()) { 5862 Instruction *I = Worklist.pop_back_val(); 5863 5864 // If we've already analyzed the instruction, there's nothing to do. 5865 if (ScalarCosts.find(I) != ScalarCosts.end()) 5866 continue; 5867 5868 // Compute the cost of the vector instruction. Note that this cost already 5869 // includes the scalarization overhead of the predicated instruction. 5870 unsigned VectorCost = getInstructionCost(I, VF).first; 5871 5872 // Compute the cost of the scalarized instruction. This cost is the cost of 5873 // the instruction as if it wasn't if-converted and instead remained in the 5874 // predicated block. We will scale this cost by block probability after 5875 // computing the scalarization overhead. 5876 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5877 5878 // Compute the scalarization overhead of needed insertelement instructions 5879 // and phi nodes. 5880 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5881 ScalarCost += TTI.getScalarizationOverhead( 5882 cast<VectorType>(ToVectorTy(I->getType(), VF)), 5883 APInt::getAllOnesValue(VF), true, false); 5884 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI, 5885 TTI::TCK_RecipThroughput); 5886 } 5887 5888 // Compute the scalarization overhead of needed extractelement 5889 // instructions. For each of the instruction's operands, if the operand can 5890 // be scalarized, add it to the worklist; otherwise, account for the 5891 // overhead. 5892 for (Use &U : I->operands()) 5893 if (auto *J = dyn_cast<Instruction>(U.get())) { 5894 assert(VectorType::isValidElementType(J->getType()) && 5895 "Instruction has non-scalar type"); 5896 if (canBeScalarized(J)) 5897 Worklist.push_back(J); 5898 else if (needsExtract(J, VF)) 5899 ScalarCost += TTI.getScalarizationOverhead( 5900 cast<VectorType>(ToVectorTy(J->getType(), VF)), 5901 APInt::getAllOnesValue(VF), false, true); 5902 } 5903 5904 // Scale the total scalar cost by block probability. 5905 ScalarCost /= getReciprocalPredBlockProb(); 5906 5907 // Compute the discount. A non-negative discount means the vector version 5908 // of the instruction costs more, and scalarizing would be beneficial. 5909 Discount += VectorCost - ScalarCost; 5910 ScalarCosts[I] = ScalarCost; 5911 } 5912 5913 return Discount; 5914 } 5915 5916 LoopVectorizationCostModel::VectorizationCostTy 5917 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5918 VectorizationCostTy Cost; 5919 5920 // For each block. 5921 for (BasicBlock *BB : TheLoop->blocks()) { 5922 VectorizationCostTy BlockCost; 5923 5924 // For each instruction in the old loop. 5925 for (Instruction &I : BB->instructionsWithoutDebug()) { 5926 // Skip ignored values. 5927 if (ValuesToIgnore.count(&I) || (VF > 1 && VecValuesToIgnore.count(&I))) 5928 continue; 5929 5930 VectorizationCostTy C = getInstructionCost(&I, VF); 5931 5932 // Check if we should override the cost. 5933 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5934 C.first = ForceTargetInstructionCost; 5935 5936 BlockCost.first += C.first; 5937 BlockCost.second |= C.second; 5938 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5939 << " for VF " << VF << " For instruction: " << I 5940 << '\n'); 5941 } 5942 5943 // If we are vectorizing a predicated block, it will have been 5944 // if-converted. This means that the block's instructions (aside from 5945 // stores and instructions that may divide by zero) will now be 5946 // unconditionally executed. For the scalar case, we may not always execute 5947 // the predicated block. Thus, scale the block's cost by the probability of 5948 // executing it. 5949 if (VF == 1 && blockNeedsPredication(BB)) 5950 BlockCost.first /= getReciprocalPredBlockProb(); 5951 5952 Cost.first += BlockCost.first; 5953 Cost.second |= BlockCost.second; 5954 } 5955 5956 return Cost; 5957 } 5958 5959 /// Gets Address Access SCEV after verifying that the access pattern 5960 /// is loop invariant except the induction variable dependence. 5961 /// 5962 /// This SCEV can be sent to the Target in order to estimate the address 5963 /// calculation cost. 5964 static const SCEV *getAddressAccessSCEV( 5965 Value *Ptr, 5966 LoopVectorizationLegality *Legal, 5967 PredicatedScalarEvolution &PSE, 5968 const Loop *TheLoop) { 5969 5970 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5971 if (!Gep) 5972 return nullptr; 5973 5974 // We are looking for a gep with all loop invariant indices except for one 5975 // which should be an induction variable. 5976 auto SE = PSE.getSE(); 5977 unsigned NumOperands = Gep->getNumOperands(); 5978 for (unsigned i = 1; i < NumOperands; ++i) { 5979 Value *Opd = Gep->getOperand(i); 5980 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5981 !Legal->isInductionVariable(Opd)) 5982 return nullptr; 5983 } 5984 5985 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5986 return PSE.getSCEV(Ptr); 5987 } 5988 5989 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5990 return Legal->hasStride(I->getOperand(0)) || 5991 Legal->hasStride(I->getOperand(1)); 5992 } 5993 5994 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5995 unsigned VF) { 5996 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5997 Type *ValTy = getMemInstValueType(I); 5998 auto SE = PSE.getSE(); 5999 6000 unsigned AS = getLoadStoreAddressSpace(I); 6001 Value *Ptr = getLoadStorePointerOperand(I); 6002 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6003 6004 // Figure out whether the access is strided and get the stride value 6005 // if it's known in compile time 6006 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6007 6008 // Get the cost of the scalar memory instruction and address computation. 6009 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6010 6011 // Don't pass *I here, since it is scalar but will actually be part of a 6012 // vectorized loop where the user of it is a vectorized instruction. 6013 const Align Alignment = getLoadStoreAlignment(I); 6014 Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6015 Alignment, AS, 6016 TTI::TCK_RecipThroughput); 6017 6018 // Get the overhead of the extractelement and insertelement instructions 6019 // we might create due to scalarization. 6020 Cost += getScalarizationOverhead(I, VF); 6021 6022 // If we have a predicated store, it may not be executed for each vector 6023 // lane. Scale the cost by the probability of executing the predicated 6024 // block. 6025 if (isPredicatedInst(I)) { 6026 Cost /= getReciprocalPredBlockProb(); 6027 6028 if (useEmulatedMaskMemRefHack(I)) 6029 // Artificially setting to a high enough value to practically disable 6030 // vectorization with such operations. 6031 Cost = 3000000; 6032 } 6033 6034 return Cost; 6035 } 6036 6037 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6038 unsigned VF) { 6039 Type *ValTy = getMemInstValueType(I); 6040 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6041 Value *Ptr = getLoadStorePointerOperand(I); 6042 unsigned AS = getLoadStoreAddressSpace(I); 6043 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6044 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6045 6046 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6047 "Stride should be 1 or -1 for consecutive memory access"); 6048 const Align Alignment = getLoadStoreAlignment(I); 6049 unsigned Cost = 0; 6050 if (Legal->isMaskRequired(I)) 6051 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6052 CostKind); 6053 else 6054 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6055 CostKind, I); 6056 6057 bool Reverse = ConsecutiveStride < 0; 6058 if (Reverse) 6059 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6060 return Cost; 6061 } 6062 6063 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6064 unsigned VF) { 6065 Type *ValTy = getMemInstValueType(I); 6066 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6067 const Align Alignment = getLoadStoreAlignment(I); 6068 unsigned AS = getLoadStoreAddressSpace(I); 6069 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6070 if (isa<LoadInst>(I)) { 6071 return TTI.getAddressComputationCost(ValTy) + 6072 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6073 CostKind) + 6074 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6075 } 6076 StoreInst *SI = cast<StoreInst>(I); 6077 6078 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6079 return TTI.getAddressComputationCost(ValTy) + 6080 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6081 CostKind) + 6082 (isLoopInvariantStoreValue 6083 ? 0 6084 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6085 VF - 1)); 6086 } 6087 6088 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6089 unsigned VF) { 6090 Type *ValTy = getMemInstValueType(I); 6091 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6092 const Align Alignment = getLoadStoreAlignment(I); 6093 const Value *Ptr = getLoadStorePointerOperand(I); 6094 6095 return TTI.getAddressComputationCost(VectorTy) + 6096 TTI.getGatherScatterOpCost( 6097 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6098 TargetTransformInfo::TCK_RecipThroughput, I); 6099 } 6100 6101 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6102 unsigned VF) { 6103 Type *ValTy = getMemInstValueType(I); 6104 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6105 unsigned AS = getLoadStoreAddressSpace(I); 6106 6107 auto Group = getInterleavedAccessGroup(I); 6108 assert(Group && "Fail to get an interleaved access group."); 6109 6110 unsigned InterleaveFactor = Group->getFactor(); 6111 auto *WideVecTy = FixedVectorType::get(ValTy, VF * InterleaveFactor); 6112 6113 // Holds the indices of existing members in an interleaved load group. 6114 // An interleaved store group doesn't need this as it doesn't allow gaps. 6115 SmallVector<unsigned, 4> Indices; 6116 if (isa<LoadInst>(I)) { 6117 for (unsigned i = 0; i < InterleaveFactor; i++) 6118 if (Group->getMember(i)) 6119 Indices.push_back(i); 6120 } 6121 6122 // Calculate the cost of the whole interleaved group. 6123 bool UseMaskForGaps = 6124 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 6125 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6126 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6127 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6128 6129 if (Group->isReverse()) { 6130 // TODO: Add support for reversed masked interleaved access. 6131 assert(!Legal->isMaskRequired(I) && 6132 "Reverse masked interleaved access not supported."); 6133 Cost += Group->getNumMembers() * 6134 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6135 } 6136 return Cost; 6137 } 6138 6139 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6140 unsigned VF) { 6141 // Calculate scalar cost only. Vectorization cost should be ready at this 6142 // moment. 6143 if (VF == 1) { 6144 Type *ValTy = getMemInstValueType(I); 6145 const Align Alignment = getLoadStoreAlignment(I); 6146 unsigned AS = getLoadStoreAddressSpace(I); 6147 6148 return TTI.getAddressComputationCost(ValTy) + 6149 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6150 TTI::TCK_RecipThroughput, I); 6151 } 6152 return getWideningCost(I, VF); 6153 } 6154 6155 LoopVectorizationCostModel::VectorizationCostTy 6156 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6157 // If we know that this instruction will remain uniform, check the cost of 6158 // the scalar version. 6159 if (isUniformAfterVectorization(I, VF)) 6160 VF = 1; 6161 6162 if (VF > 1 && isProfitableToScalarize(I, VF)) 6163 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6164 6165 // Forced scalars do not have any scalarization overhead. 6166 auto ForcedScalar = ForcedScalars.find(VF); 6167 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 6168 auto InstSet = ForcedScalar->second; 6169 if (InstSet.count(I)) 6170 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 6171 } 6172 6173 Type *VectorTy; 6174 unsigned C = getInstructionCost(I, VF, VectorTy); 6175 6176 bool TypeNotScalarized = 6177 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 6178 return VectorizationCostTy(C, TypeNotScalarized); 6179 } 6180 6181 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6182 unsigned VF) { 6183 6184 if (VF == 1) 6185 return 0; 6186 6187 unsigned Cost = 0; 6188 Type *RetTy = ToVectorTy(I->getType(), VF); 6189 if (!RetTy->isVoidTy() && 6190 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6191 Cost += TTI.getScalarizationOverhead( 6192 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF), true, false); 6193 6194 // Some targets keep addresses scalar. 6195 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6196 return Cost; 6197 6198 // Some targets support efficient element stores. 6199 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6200 return Cost; 6201 6202 // Collect operands to consider. 6203 CallInst *CI = dyn_cast<CallInst>(I); 6204 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 6205 6206 // Skip operands that do not require extraction/scalarization and do not incur 6207 // any overhead. 6208 return Cost + TTI.getOperandsScalarizationOverhead( 6209 filterExtractingOperands(Ops, VF), VF); 6210 } 6211 6212 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 6213 if (VF == 1) 6214 return; 6215 NumPredStores = 0; 6216 for (BasicBlock *BB : TheLoop->blocks()) { 6217 // For each instruction in the old loop. 6218 for (Instruction &I : *BB) { 6219 Value *Ptr = getLoadStorePointerOperand(&I); 6220 if (!Ptr) 6221 continue; 6222 6223 // TODO: We should generate better code and update the cost model for 6224 // predicated uniform stores. Today they are treated as any other 6225 // predicated store (see added test cases in 6226 // invariant-store-vectorization.ll). 6227 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 6228 NumPredStores++; 6229 6230 if (Legal->isUniform(Ptr) && 6231 // Conditional loads and stores should be scalarized and predicated. 6232 // isScalarWithPredication cannot be used here since masked 6233 // gather/scatters are not considered scalar with predication. 6234 !Legal->blockNeedsPredication(I.getParent())) { 6235 // TODO: Avoid replicating loads and stores instead of 6236 // relying on instcombine to remove them. 6237 // Load: Scalar load + broadcast 6238 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6239 unsigned Cost = getUniformMemOpCost(&I, VF); 6240 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6241 continue; 6242 } 6243 6244 // We assume that widening is the best solution when possible. 6245 if (memoryInstructionCanBeWidened(&I, VF)) { 6246 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 6247 int ConsecutiveStride = 6248 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 6249 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6250 "Expected consecutive stride."); 6251 InstWidening Decision = 6252 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6253 setWideningDecision(&I, VF, Decision, Cost); 6254 continue; 6255 } 6256 6257 // Choose between Interleaving, Gather/Scatter or Scalarization. 6258 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 6259 unsigned NumAccesses = 1; 6260 if (isAccessInterleaved(&I)) { 6261 auto Group = getInterleavedAccessGroup(&I); 6262 assert(Group && "Fail to get an interleaved access group."); 6263 6264 // Make one decision for the whole group. 6265 if (getWideningDecision(&I, VF) != CM_Unknown) 6266 continue; 6267 6268 NumAccesses = Group->getNumMembers(); 6269 if (interleavedAccessCanBeWidened(&I, VF)) 6270 InterleaveCost = getInterleaveGroupCost(&I, VF); 6271 } 6272 6273 unsigned GatherScatterCost = 6274 isLegalGatherOrScatter(&I) 6275 ? getGatherScatterCost(&I, VF) * NumAccesses 6276 : std::numeric_limits<unsigned>::max(); 6277 6278 unsigned ScalarizationCost = 6279 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6280 6281 // Choose better solution for the current VF, 6282 // write down this decision and use it during vectorization. 6283 unsigned Cost; 6284 InstWidening Decision; 6285 if (InterleaveCost <= GatherScatterCost && 6286 InterleaveCost < ScalarizationCost) { 6287 Decision = CM_Interleave; 6288 Cost = InterleaveCost; 6289 } else if (GatherScatterCost < ScalarizationCost) { 6290 Decision = CM_GatherScatter; 6291 Cost = GatherScatterCost; 6292 } else { 6293 Decision = CM_Scalarize; 6294 Cost = ScalarizationCost; 6295 } 6296 // If the instructions belongs to an interleave group, the whole group 6297 // receives the same decision. The whole group receives the cost, but 6298 // the cost will actually be assigned to one instruction. 6299 if (auto Group = getInterleavedAccessGroup(&I)) 6300 setWideningDecision(Group, VF, Decision, Cost); 6301 else 6302 setWideningDecision(&I, VF, Decision, Cost); 6303 } 6304 } 6305 6306 // Make sure that any load of address and any other address computation 6307 // remains scalar unless there is gather/scatter support. This avoids 6308 // inevitable extracts into address registers, and also has the benefit of 6309 // activating LSR more, since that pass can't optimize vectorized 6310 // addresses. 6311 if (TTI.prefersVectorizedAddressing()) 6312 return; 6313 6314 // Start with all scalar pointer uses. 6315 SmallPtrSet<Instruction *, 8> AddrDefs; 6316 for (BasicBlock *BB : TheLoop->blocks()) 6317 for (Instruction &I : *BB) { 6318 Instruction *PtrDef = 6319 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6320 if (PtrDef && TheLoop->contains(PtrDef) && 6321 getWideningDecision(&I, VF) != CM_GatherScatter) 6322 AddrDefs.insert(PtrDef); 6323 } 6324 6325 // Add all instructions used to generate the addresses. 6326 SmallVector<Instruction *, 4> Worklist; 6327 for (auto *I : AddrDefs) 6328 Worklist.push_back(I); 6329 while (!Worklist.empty()) { 6330 Instruction *I = Worklist.pop_back_val(); 6331 for (auto &Op : I->operands()) 6332 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6333 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6334 AddrDefs.insert(InstOp).second) 6335 Worklist.push_back(InstOp); 6336 } 6337 6338 for (auto *I : AddrDefs) { 6339 if (isa<LoadInst>(I)) { 6340 // Setting the desired widening decision should ideally be handled in 6341 // by cost functions, but since this involves the task of finding out 6342 // if the loaded register is involved in an address computation, it is 6343 // instead changed here when we know this is the case. 6344 InstWidening Decision = getWideningDecision(I, VF); 6345 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6346 // Scalarize a widened load of address. 6347 setWideningDecision(I, VF, CM_Scalarize, 6348 (VF * getMemoryInstructionCost(I, 1))); 6349 else if (auto Group = getInterleavedAccessGroup(I)) { 6350 // Scalarize an interleave group of address loads. 6351 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6352 if (Instruction *Member = Group->getMember(I)) 6353 setWideningDecision(Member, VF, CM_Scalarize, 6354 (VF * getMemoryInstructionCost(Member, 1))); 6355 } 6356 } 6357 } else 6358 // Make sure I gets scalarized and a cost estimate without 6359 // scalarization overhead. 6360 ForcedScalars[VF].insert(I); 6361 } 6362 } 6363 6364 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6365 unsigned VF, 6366 Type *&VectorTy) { 6367 Type *RetTy = I->getType(); 6368 if (canTruncateToMinimalBitwidth(I, VF)) 6369 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6370 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 6371 auto SE = PSE.getSE(); 6372 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6373 6374 // TODO: We need to estimate the cost of intrinsic calls. 6375 switch (I->getOpcode()) { 6376 case Instruction::GetElementPtr: 6377 // We mark this instruction as zero-cost because the cost of GEPs in 6378 // vectorized code depends on whether the corresponding memory instruction 6379 // is scalarized or not. Therefore, we handle GEPs with the memory 6380 // instruction cost. 6381 return 0; 6382 case Instruction::Br: { 6383 // In cases of scalarized and predicated instructions, there will be VF 6384 // predicated blocks in the vectorized loop. Each branch around these 6385 // blocks requires also an extract of its vector compare i1 element. 6386 bool ScalarPredicatedBB = false; 6387 BranchInst *BI = cast<BranchInst>(I); 6388 if (VF > 1 && BI->isConditional() && 6389 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 6390 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 6391 ScalarPredicatedBB = true; 6392 6393 if (ScalarPredicatedBB) { 6394 // Return cost for branches around scalarized and predicated blocks. 6395 auto *Vec_i1Ty = 6396 FixedVectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 6397 return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF), 6398 false, true) + 6399 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF)); 6400 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 6401 // The back-edge branch will remain, as will all scalar branches. 6402 return TTI.getCFInstrCost(Instruction::Br, CostKind); 6403 else 6404 // This branch will be eliminated by if-conversion. 6405 return 0; 6406 // Note: We currently assume zero cost for an unconditional branch inside 6407 // a predicated block since it will become a fall-through, although we 6408 // may decide in the future to call TTI for all branches. 6409 } 6410 case Instruction::PHI: { 6411 auto *Phi = cast<PHINode>(I); 6412 6413 // First-order recurrences are replaced by vector shuffles inside the loop. 6414 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 6415 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6416 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6417 cast<VectorType>(VectorTy), VF - 1, 6418 FixedVectorType::get(RetTy, 1)); 6419 6420 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 6421 // converted into select instructions. We require N - 1 selects per phi 6422 // node, where N is the number of incoming values. 6423 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 6424 return (Phi->getNumIncomingValues() - 1) * 6425 TTI.getCmpSelInstrCost( 6426 Instruction::Select, ToVectorTy(Phi->getType(), VF), 6427 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 6428 CostKind); 6429 6430 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 6431 } 6432 case Instruction::UDiv: 6433 case Instruction::SDiv: 6434 case Instruction::URem: 6435 case Instruction::SRem: 6436 // If we have a predicated instruction, it may not be executed for each 6437 // vector lane. Get the scalarization cost and scale this amount by the 6438 // probability of executing the predicated block. If the instruction is not 6439 // predicated, we fall through to the next case. 6440 if (VF > 1 && isScalarWithPredication(I)) { 6441 unsigned Cost = 0; 6442 6443 // These instructions have a non-void type, so account for the phi nodes 6444 // that we will create. This cost is likely to be zero. The phi node 6445 // cost, if any, should be scaled by the block probability because it 6446 // models a copy at the end of each predicated block. 6447 Cost += VF * TTI.getCFInstrCost(Instruction::PHI, CostKind); 6448 6449 // The cost of the non-predicated instruction. 6450 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 6451 6452 // The cost of insertelement and extractelement instructions needed for 6453 // scalarization. 6454 Cost += getScalarizationOverhead(I, VF); 6455 6456 // Scale the cost by the probability of executing the predicated blocks. 6457 // This assumes the predicated block for each vector lane is equally 6458 // likely. 6459 return Cost / getReciprocalPredBlockProb(); 6460 } 6461 LLVM_FALLTHROUGH; 6462 case Instruction::Add: 6463 case Instruction::FAdd: 6464 case Instruction::Sub: 6465 case Instruction::FSub: 6466 case Instruction::Mul: 6467 case Instruction::FMul: 6468 case Instruction::FDiv: 6469 case Instruction::FRem: 6470 case Instruction::Shl: 6471 case Instruction::LShr: 6472 case Instruction::AShr: 6473 case Instruction::And: 6474 case Instruction::Or: 6475 case Instruction::Xor: { 6476 // Since we will replace the stride by 1 the multiplication should go away. 6477 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6478 return 0; 6479 // Certain instructions can be cheaper to vectorize if they have a constant 6480 // second vector operand. One example of this are shifts on x86. 6481 Value *Op2 = I->getOperand(1); 6482 TargetTransformInfo::OperandValueProperties Op2VP; 6483 TargetTransformInfo::OperandValueKind Op2VK = 6484 TTI.getOperandInfo(Op2, Op2VP); 6485 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 6486 Op2VK = TargetTransformInfo::OK_UniformValue; 6487 6488 SmallVector<const Value *, 4> Operands(I->operand_values()); 6489 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6490 return N * TTI.getArithmeticInstrCost( 6491 I->getOpcode(), VectorTy, CostKind, 6492 TargetTransformInfo::OK_AnyValue, 6493 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 6494 } 6495 case Instruction::FNeg: { 6496 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6497 return N * TTI.getArithmeticInstrCost( 6498 I->getOpcode(), VectorTy, CostKind, 6499 TargetTransformInfo::OK_AnyValue, 6500 TargetTransformInfo::OK_AnyValue, 6501 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 6502 I->getOperand(0), I); 6503 } 6504 case Instruction::Select: { 6505 SelectInst *SI = cast<SelectInst>(I); 6506 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6507 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6508 Type *CondTy = SI->getCondition()->getType(); 6509 if (!ScalarCond) 6510 CondTy = FixedVectorType::get(CondTy, VF); 6511 6512 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 6513 CostKind, I); 6514 } 6515 case Instruction::ICmp: 6516 case Instruction::FCmp: { 6517 Type *ValTy = I->getOperand(0)->getType(); 6518 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6519 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 6520 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 6521 VectorTy = ToVectorTy(ValTy, VF); 6522 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, CostKind, 6523 I); 6524 } 6525 case Instruction::Store: 6526 case Instruction::Load: { 6527 unsigned Width = VF; 6528 if (Width > 1) { 6529 InstWidening Decision = getWideningDecision(I, Width); 6530 assert(Decision != CM_Unknown && 6531 "CM decision should be taken at this point"); 6532 if (Decision == CM_Scalarize) 6533 Width = 1; 6534 } 6535 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 6536 return getMemoryInstructionCost(I, VF); 6537 } 6538 case Instruction::ZExt: 6539 case Instruction::SExt: 6540 case Instruction::FPToUI: 6541 case Instruction::FPToSI: 6542 case Instruction::FPExt: 6543 case Instruction::PtrToInt: 6544 case Instruction::IntToPtr: 6545 case Instruction::SIToFP: 6546 case Instruction::UIToFP: 6547 case Instruction::Trunc: 6548 case Instruction::FPTrunc: 6549 case Instruction::BitCast: { 6550 // Computes the CastContextHint from a Load/Store instruction. 6551 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 6552 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6553 "Expected a load or a store!"); 6554 6555 if (VF == 1 || !TheLoop->contains(I)) 6556 return TTI::CastContextHint::Normal; 6557 6558 switch (getWideningDecision(I, VF)) { 6559 case LoopVectorizationCostModel::CM_GatherScatter: 6560 return TTI::CastContextHint::GatherScatter; 6561 case LoopVectorizationCostModel::CM_Interleave: 6562 return TTI::CastContextHint::Interleave; 6563 case LoopVectorizationCostModel::CM_Scalarize: 6564 case LoopVectorizationCostModel::CM_Widen: 6565 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 6566 : TTI::CastContextHint::Normal; 6567 case LoopVectorizationCostModel::CM_Widen_Reverse: 6568 return TTI::CastContextHint::Reversed; 6569 case LoopVectorizationCostModel::CM_Unknown: 6570 llvm_unreachable("Instr did not go through cost modelling?"); 6571 } 6572 6573 llvm_unreachable("Unhandled case!"); 6574 }; 6575 6576 unsigned Opcode = I->getOpcode(); 6577 TTI::CastContextHint CCH = TTI::CastContextHint::None; 6578 // For Trunc, the context is the only user, which must be a StoreInst. 6579 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 6580 if (I->hasOneUse()) 6581 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 6582 CCH = ComputeCCH(Store); 6583 } 6584 // For Z/Sext, the context is the operand, which must be a LoadInst. 6585 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 6586 Opcode == Instruction::FPExt) { 6587 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 6588 CCH = ComputeCCH(Load); 6589 } 6590 6591 // We optimize the truncation of induction variables having constant 6592 // integer steps. The cost of these truncations is the same as the scalar 6593 // operation. 6594 if (isOptimizableIVTruncate(I, VF)) { 6595 auto *Trunc = cast<TruncInst>(I); 6596 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 6597 Trunc->getSrcTy(), CCH, CostKind, Trunc); 6598 } 6599 6600 Type *SrcScalarTy = I->getOperand(0)->getType(); 6601 Type *SrcVecTy = 6602 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 6603 if (canTruncateToMinimalBitwidth(I, VF)) { 6604 // This cast is going to be shrunk. This may remove the cast or it might 6605 // turn it into slightly different cast. For example, if MinBW == 16, 6606 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6607 // 6608 // Calculate the modified src and dest types. 6609 Type *MinVecTy = VectorTy; 6610 if (Opcode == Instruction::Trunc) { 6611 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6612 VectorTy = 6613 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6614 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 6615 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6616 VectorTy = 6617 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6618 } 6619 } 6620 6621 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6622 return N * 6623 TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 6624 } 6625 case Instruction::Call: { 6626 bool NeedToScalarize; 6627 CallInst *CI = cast<CallInst>(I); 6628 unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 6629 if (getVectorIntrinsicIDForCall(CI, TLI)) 6630 return std::min(CallCost, getVectorIntrinsicCost(CI, VF)); 6631 return CallCost; 6632 } 6633 default: 6634 // The cost of executing VF copies of the scalar instruction. This opcode 6635 // is unknown. Assume that it is the same as 'mul'. 6636 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, 6637 CostKind) + 6638 getScalarizationOverhead(I, VF); 6639 } // end of switch. 6640 } 6641 6642 char LoopVectorize::ID = 0; 6643 6644 static const char lv_name[] = "Loop Vectorization"; 6645 6646 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6647 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6648 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6649 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6650 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6651 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6652 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6653 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6654 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6655 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6656 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6657 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6658 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6659 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 6660 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 6661 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6662 6663 namespace llvm { 6664 6665 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 6666 6667 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6668 bool VectorizeOnlyWhenForced) { 6669 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6670 } 6671 6672 } // end namespace llvm 6673 6674 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6675 // Check if the pointer operand of a load or store instruction is 6676 // consecutive. 6677 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6678 return Legal->isConsecutivePtr(Ptr); 6679 return false; 6680 } 6681 6682 void LoopVectorizationCostModel::collectValuesToIgnore() { 6683 // Ignore ephemeral values. 6684 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6685 6686 // Ignore type-promoting instructions we identified during reduction 6687 // detection. 6688 for (auto &Reduction : Legal->getReductionVars()) { 6689 RecurrenceDescriptor &RedDes = Reduction.second; 6690 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6691 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6692 } 6693 // Ignore type-casting instructions we identified during induction 6694 // detection. 6695 for (auto &Induction : Legal->getInductionVars()) { 6696 InductionDescriptor &IndDes = Induction.second; 6697 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6698 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6699 } 6700 } 6701 6702 void LoopVectorizationCostModel::collectInLoopReductions() { 6703 // For the moment, without predicated reduction instructions, we do not 6704 // support inloop reductions whilst folding the tail, and hence in those cases 6705 // all reductions are currently out of the loop. 6706 if (!PreferInLoopReductions || foldTailByMasking()) 6707 return; 6708 6709 for (auto &Reduction : Legal->getReductionVars()) { 6710 PHINode *Phi = Reduction.first; 6711 RecurrenceDescriptor &RdxDesc = Reduction.second; 6712 6713 // We don't collect reductions that are type promoted (yet). 6714 if (RdxDesc.getRecurrenceType() != Phi->getType()) 6715 continue; 6716 6717 // Check that we can correctly put the reductions into the loop, by 6718 // finding the chain of operations that leads from the phi to the loop 6719 // exit value. 6720 SmallVector<Instruction *, 4> ReductionOperations = 6721 RdxDesc.getReductionOpChain(Phi, TheLoop); 6722 bool InLoop = !ReductionOperations.empty(); 6723 if (InLoop) 6724 InLoopReductionChains[Phi] = ReductionOperations; 6725 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 6726 << " reduction for phi: " << *Phi << "\n"); 6727 } 6728 } 6729 6730 // TODO: we could return a pair of values that specify the max VF and 6731 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 6732 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 6733 // doesn't have a cost model that can choose which plan to execute if 6734 // more than one is generated. 6735 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 6736 LoopVectorizationCostModel &CM) { 6737 unsigned WidestType; 6738 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 6739 return WidestVectorRegBits / WidestType; 6740 } 6741 6742 VectorizationFactor 6743 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) { 6744 unsigned VF = UserVF; 6745 // Outer loop handling: They may require CFG and instruction level 6746 // transformations before even evaluating whether vectorization is profitable. 6747 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6748 // the vectorization pipeline. 6749 if (!OrigLoop->empty()) { 6750 // If the user doesn't provide a vectorization factor, determine a 6751 // reasonable one. 6752 if (!UserVF) { 6753 VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM); 6754 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 6755 6756 // Make sure we have a VF > 1 for stress testing. 6757 if (VPlanBuildStressTest && VF < 2) { 6758 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 6759 << "overriding computed VF.\n"); 6760 VF = 4; 6761 } 6762 } 6763 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6764 assert(isPowerOf2_32(VF) && "VF needs to be a power of two"); 6765 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF 6766 << " to build VPlans.\n"); 6767 buildVPlans(VF, VF); 6768 6769 // For VPlan build stress testing, we bail out after VPlan construction. 6770 if (VPlanBuildStressTest) 6771 return VectorizationFactor::Disabled(); 6772 6773 return {VF, 0}; 6774 } 6775 6776 LLVM_DEBUG( 6777 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6778 "VPlan-native path.\n"); 6779 return VectorizationFactor::Disabled(); 6780 } 6781 6782 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF, 6783 unsigned UserIC) { 6784 assert(OrigLoop->empty() && "Inner loop expected."); 6785 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(UserVF, UserIC); 6786 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 6787 return None; 6788 6789 // Invalidate interleave groups if all blocks of loop will be predicated. 6790 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6791 !useMaskedInterleavedAccesses(*TTI)) { 6792 LLVM_DEBUG( 6793 dbgs() 6794 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6795 "which requires masked-interleaved support.\n"); 6796 if (CM.InterleaveInfo.invalidateGroups()) 6797 // Invalidating interleave groups also requires invalidating all decisions 6798 // based on them, which includes widening decisions and uniform and scalar 6799 // values. 6800 CM.invalidateCostModelingDecisions(); 6801 } 6802 6803 if (UserVF) { 6804 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6805 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6806 // Collect the instructions (and their associated costs) that will be more 6807 // profitable to scalarize. 6808 CM.selectUserVectorizationFactor(UserVF); 6809 CM.collectInLoopReductions(); 6810 buildVPlansWithVPRecipes(UserVF, UserVF); 6811 LLVM_DEBUG(printPlans(dbgs())); 6812 return {{UserVF, 0}}; 6813 } 6814 6815 unsigned MaxVF = MaybeMaxVF.getValue(); 6816 assert(MaxVF != 0 && "MaxVF is zero."); 6817 6818 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6819 // Collect Uniform and Scalar instructions after vectorization with VF. 6820 CM.collectUniformsAndScalars(VF); 6821 6822 // Collect the instructions (and their associated costs) that will be more 6823 // profitable to scalarize. 6824 if (VF > 1) 6825 CM.collectInstsToScalarize(VF); 6826 } 6827 6828 CM.collectInLoopReductions(); 6829 6830 buildVPlansWithVPRecipes(1, MaxVF); 6831 LLVM_DEBUG(printPlans(dbgs())); 6832 if (MaxVF == 1) 6833 return VectorizationFactor::Disabled(); 6834 6835 // Select the optimal vectorization factor. 6836 return CM.selectVectorizationFactor(MaxVF); 6837 } 6838 6839 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6840 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6841 << '\n'); 6842 BestVF = VF; 6843 BestUF = UF; 6844 6845 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6846 return !Plan->hasVF(VF); 6847 }); 6848 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6849 } 6850 6851 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6852 DominatorTree *DT) { 6853 // Perform the actual loop transformation. 6854 6855 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6856 VPCallbackILV CallbackILV(ILV); 6857 6858 VPTransformState State{BestVF, BestUF, LI, 6859 DT, ILV.Builder, ILV.VectorLoopValueMap, 6860 &ILV, CallbackILV}; 6861 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6862 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6863 State.CanonicalIV = ILV.Induction; 6864 6865 //===------------------------------------------------===// 6866 // 6867 // Notice: any optimization or new instruction that go 6868 // into the code below should also be implemented in 6869 // the cost-model. 6870 // 6871 //===------------------------------------------------===// 6872 6873 // 2. Copy and widen instructions from the old loop into the new loop. 6874 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6875 VPlans.front()->execute(&State); 6876 6877 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6878 // predication, updating analyses. 6879 ILV.fixVectorizedLoop(); 6880 } 6881 6882 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6883 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6884 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6885 6886 // We create new control-flow for the vectorized loop, so the original 6887 // condition will be dead after vectorization if it's only used by the 6888 // branch. 6889 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6890 if (Cmp && Cmp->hasOneUse()) 6891 DeadInstructions.insert(Cmp); 6892 6893 // We create new "steps" for induction variable updates to which the original 6894 // induction variables map. An original update instruction will be dead if 6895 // all its users except the induction variable are dead. 6896 for (auto &Induction : Legal->getInductionVars()) { 6897 PHINode *Ind = Induction.first; 6898 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6899 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6900 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 6901 })) 6902 DeadInstructions.insert(IndUpdate); 6903 6904 // We record as "Dead" also the type-casting instructions we had identified 6905 // during induction analysis. We don't need any handling for them in the 6906 // vectorized loop because we have proven that, under a proper runtime 6907 // test guarding the vectorized loop, the value of the phi, and the casted 6908 // value of the phi, are the same. The last instruction in this casting chain 6909 // will get its scalar/vector/widened def from the scalar/vector/widened def 6910 // of the respective phi node. Any other casts in the induction def-use chain 6911 // have no other uses outside the phi update chain, and will be ignored. 6912 InductionDescriptor &IndDes = Induction.second; 6913 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6914 DeadInstructions.insert(Casts.begin(), Casts.end()); 6915 } 6916 } 6917 6918 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6919 6920 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6921 6922 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6923 Instruction::BinaryOps BinOp) { 6924 // When unrolling and the VF is 1, we only need to add a simple scalar. 6925 Type *Ty = Val->getType(); 6926 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6927 6928 if (Ty->isFloatingPointTy()) { 6929 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6930 6931 // Floating point operations had to be 'fast' to enable the unrolling. 6932 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6933 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6934 } 6935 Constant *C = ConstantInt::get(Ty, StartIdx); 6936 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6937 } 6938 6939 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6940 SmallVector<Metadata *, 4> MDs; 6941 // Reserve first location for self reference to the LoopID metadata node. 6942 MDs.push_back(nullptr); 6943 bool IsUnrollMetadata = false; 6944 MDNode *LoopID = L->getLoopID(); 6945 if (LoopID) { 6946 // First find existing loop unrolling disable metadata. 6947 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6948 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6949 if (MD) { 6950 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6951 IsUnrollMetadata = 6952 S && S->getString().startswith("llvm.loop.unroll.disable"); 6953 } 6954 MDs.push_back(LoopID->getOperand(i)); 6955 } 6956 } 6957 6958 if (!IsUnrollMetadata) { 6959 // Add runtime unroll disable metadata. 6960 LLVMContext &Context = L->getHeader()->getContext(); 6961 SmallVector<Metadata *, 1> DisableOperands; 6962 DisableOperands.push_back( 6963 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6964 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6965 MDs.push_back(DisableNode); 6966 MDNode *NewLoopID = MDNode::get(Context, MDs); 6967 // Set operand 0 to refer to the loop id itself. 6968 NewLoopID->replaceOperandWith(0, NewLoopID); 6969 L->setLoopID(NewLoopID); 6970 } 6971 } 6972 6973 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6974 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6975 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6976 bool PredicateAtRangeStart = Predicate(Range.Start); 6977 6978 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6979 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6980 Range.End = TmpVF; 6981 break; 6982 } 6983 6984 return PredicateAtRangeStart; 6985 } 6986 6987 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6988 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6989 /// of VF's starting at a given VF and extending it as much as possible. Each 6990 /// vectorization decision can potentially shorten this sub-range during 6991 /// buildVPlan(). 6992 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6993 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6994 VFRange SubRange = {VF, MaxVF + 1}; 6995 VPlans.push_back(buildVPlan(SubRange)); 6996 VF = SubRange.End; 6997 } 6998 } 6999 7000 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 7001 VPlanPtr &Plan) { 7002 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 7003 7004 // Look for cached value. 7005 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 7006 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 7007 if (ECEntryIt != EdgeMaskCache.end()) 7008 return ECEntryIt->second; 7009 7010 VPValue *SrcMask = createBlockInMask(Src, Plan); 7011 7012 // The terminator has to be a branch inst! 7013 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 7014 assert(BI && "Unexpected terminator found"); 7015 7016 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 7017 return EdgeMaskCache[Edge] = SrcMask; 7018 7019 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 7020 assert(EdgeMask && "No Edge Mask found for condition"); 7021 7022 if (BI->getSuccessor(0) != Dst) 7023 EdgeMask = Builder.createNot(EdgeMask); 7024 7025 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 7026 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 7027 7028 return EdgeMaskCache[Edge] = EdgeMask; 7029 } 7030 7031 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 7032 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 7033 7034 // Look for cached value. 7035 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 7036 if (BCEntryIt != BlockMaskCache.end()) 7037 return BCEntryIt->second; 7038 7039 // All-one mask is modelled as no-mask following the convention for masked 7040 // load/store/gather/scatter. Initialize BlockMask to no-mask. 7041 VPValue *BlockMask = nullptr; 7042 7043 if (OrigLoop->getHeader() == BB) { 7044 if (!CM.blockNeedsPredication(BB)) 7045 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 7046 7047 // Introduce the early-exit compare IV <= BTC to form header block mask. 7048 // This is used instead of IV < TC because TC may wrap, unlike BTC. 7049 // Start by constructing the desired canonical IV. 7050 VPValue *IV = nullptr; 7051 if (Legal->getPrimaryInduction()) 7052 IV = Plan->getVPValue(Legal->getPrimaryInduction()); 7053 else { 7054 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 7055 Builder.getInsertBlock()->appendRecipe(IVRecipe); 7056 IV = IVRecipe->getVPValue(); 7057 } 7058 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 7059 bool TailFolded = !CM.isScalarEpilogueAllowed(); 7060 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) 7061 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, BTC}); 7062 else 7063 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 7064 return BlockMaskCache[BB] = BlockMask; 7065 } 7066 7067 // This is the block mask. We OR all incoming edges. 7068 for (auto *Predecessor : predecessors(BB)) { 7069 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 7070 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 7071 return BlockMaskCache[BB] = EdgeMask; 7072 7073 if (!BlockMask) { // BlockMask has its initialized nullptr value. 7074 BlockMask = EdgeMask; 7075 continue; 7076 } 7077 7078 BlockMask = Builder.createOr(BlockMask, EdgeMask); 7079 } 7080 7081 return BlockMaskCache[BB] = BlockMask; 7082 } 7083 7084 VPWidenMemoryInstructionRecipe * 7085 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 7086 VPlanPtr &Plan) { 7087 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7088 "Must be called with either a load or store"); 7089 7090 auto willWiden = [&](unsigned VF) -> bool { 7091 if (VF == 1) 7092 return false; 7093 LoopVectorizationCostModel::InstWidening Decision = 7094 CM.getWideningDecision(I, VF); 7095 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 7096 "CM decision should be taken at this point."); 7097 if (Decision == LoopVectorizationCostModel::CM_Interleave) 7098 return true; 7099 if (CM.isScalarAfterVectorization(I, VF) || 7100 CM.isProfitableToScalarize(I, VF)) 7101 return false; 7102 return Decision != LoopVectorizationCostModel::CM_Scalarize; 7103 }; 7104 7105 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 7106 return nullptr; 7107 7108 VPValue *Mask = nullptr; 7109 if (Legal->isMaskRequired(I)) 7110 Mask = createBlockInMask(I->getParent(), Plan); 7111 7112 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 7113 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 7114 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 7115 7116 StoreInst *Store = cast<StoreInst>(I); 7117 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 7118 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 7119 } 7120 7121 VPWidenIntOrFpInductionRecipe * 7122 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const { 7123 // Check if this is an integer or fp induction. If so, build the recipe that 7124 // produces its scalar and vector values. 7125 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 7126 if (II.getKind() == InductionDescriptor::IK_IntInduction || 7127 II.getKind() == InductionDescriptor::IK_FpInduction) 7128 return new VPWidenIntOrFpInductionRecipe(Phi); 7129 7130 return nullptr; 7131 } 7132 7133 VPWidenIntOrFpInductionRecipe * 7134 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, 7135 VFRange &Range) const { 7136 // Optimize the special case where the source is a constant integer 7137 // induction variable. Notice that we can only optimize the 'trunc' case 7138 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 7139 // (c) other casts depend on pointer size. 7140 7141 // Determine whether \p K is a truncation based on an induction variable that 7142 // can be optimized. 7143 auto isOptimizableIVTruncate = 7144 [&](Instruction *K) -> std::function<bool(unsigned)> { 7145 return 7146 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 7147 }; 7148 7149 if (LoopVectorizationPlanner::getDecisionAndClampRange( 7150 isOptimizableIVTruncate(I), Range)) 7151 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 7152 I); 7153 return nullptr; 7154 } 7155 7156 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 7157 // We know that all PHIs in non-header blocks are converted into selects, so 7158 // we don't have to worry about the insertion order and we can just use the 7159 // builder. At this point we generate the predication tree. There may be 7160 // duplications since this is a simple recursive scan, but future 7161 // optimizations will clean it up. 7162 7163 SmallVector<VPValue *, 2> Operands; 7164 unsigned NumIncoming = Phi->getNumIncomingValues(); 7165 for (unsigned In = 0; In < NumIncoming; In++) { 7166 VPValue *EdgeMask = 7167 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 7168 assert((EdgeMask || NumIncoming == 1) && 7169 "Multiple predecessors with one having a full mask"); 7170 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 7171 if (EdgeMask) 7172 Operands.push_back(EdgeMask); 7173 } 7174 return new VPBlendRecipe(Phi, Operands); 7175 } 7176 7177 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 7178 VPlan &Plan) const { 7179 7180 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 7181 [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); }, 7182 Range); 7183 7184 if (IsPredicated) 7185 return nullptr; 7186 7187 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 7188 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 7189 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 7190 return nullptr; 7191 7192 auto willWiden = [&](unsigned VF) -> bool { 7193 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 7194 // The following case may be scalarized depending on the VF. 7195 // The flag shows whether we use Intrinsic or a usual Call for vectorized 7196 // version of the instruction. 7197 // Is it beneficial to perform intrinsic call compared to lib call? 7198 bool NeedToScalarize = false; 7199 unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 7200 bool UseVectorIntrinsic = 7201 ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost; 7202 return UseVectorIntrinsic || !NeedToScalarize; 7203 }; 7204 7205 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 7206 return nullptr; 7207 7208 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 7209 } 7210 7211 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 7212 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 7213 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 7214 // Instruction should be widened, unless it is scalar after vectorization, 7215 // scalarization is profitable or it is predicated. 7216 auto WillScalarize = [this, I](unsigned VF) -> bool { 7217 return CM.isScalarAfterVectorization(I, VF) || 7218 CM.isProfitableToScalarize(I, VF) || 7219 CM.isScalarWithPredication(I, VF); 7220 }; 7221 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 7222 Range); 7223 } 7224 7225 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 7226 auto IsVectorizableOpcode = [](unsigned Opcode) { 7227 switch (Opcode) { 7228 case Instruction::Add: 7229 case Instruction::And: 7230 case Instruction::AShr: 7231 case Instruction::BitCast: 7232 case Instruction::FAdd: 7233 case Instruction::FCmp: 7234 case Instruction::FDiv: 7235 case Instruction::FMul: 7236 case Instruction::FNeg: 7237 case Instruction::FPExt: 7238 case Instruction::FPToSI: 7239 case Instruction::FPToUI: 7240 case Instruction::FPTrunc: 7241 case Instruction::FRem: 7242 case Instruction::FSub: 7243 case Instruction::ICmp: 7244 case Instruction::IntToPtr: 7245 case Instruction::LShr: 7246 case Instruction::Mul: 7247 case Instruction::Or: 7248 case Instruction::PtrToInt: 7249 case Instruction::SDiv: 7250 case Instruction::Select: 7251 case Instruction::SExt: 7252 case Instruction::Shl: 7253 case Instruction::SIToFP: 7254 case Instruction::SRem: 7255 case Instruction::Sub: 7256 case Instruction::Trunc: 7257 case Instruction::UDiv: 7258 case Instruction::UIToFP: 7259 case Instruction::URem: 7260 case Instruction::Xor: 7261 case Instruction::ZExt: 7262 return true; 7263 } 7264 return false; 7265 }; 7266 7267 if (!IsVectorizableOpcode(I->getOpcode())) 7268 return nullptr; 7269 7270 // Success: widen this instruction. 7271 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 7272 } 7273 7274 VPBasicBlock *VPRecipeBuilder::handleReplication( 7275 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 7276 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 7277 VPlanPtr &Plan) { 7278 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 7279 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 7280 Range); 7281 7282 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 7283 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 7284 7285 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 7286 IsUniform, IsPredicated); 7287 setRecipe(I, Recipe); 7288 7289 // Find if I uses a predicated instruction. If so, it will use its scalar 7290 // value. Avoid hoisting the insert-element which packs the scalar value into 7291 // a vector value, as that happens iff all users use the vector value. 7292 for (auto &Op : I->operands()) 7293 if (auto *PredInst = dyn_cast<Instruction>(Op)) 7294 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 7295 PredInst2Recipe[PredInst]->setAlsoPack(false); 7296 7297 // Finalize the recipe for Instr, first if it is not predicated. 7298 if (!IsPredicated) { 7299 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 7300 VPBB->appendRecipe(Recipe); 7301 return VPBB; 7302 } 7303 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 7304 assert(VPBB->getSuccessors().empty() && 7305 "VPBB has successors when handling predicated replication."); 7306 // Record predicated instructions for above packing optimizations. 7307 PredInst2Recipe[I] = Recipe; 7308 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 7309 VPBlockUtils::insertBlockAfter(Region, VPBB); 7310 auto *RegSucc = new VPBasicBlock(); 7311 VPBlockUtils::insertBlockAfter(RegSucc, Region); 7312 return RegSucc; 7313 } 7314 7315 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 7316 VPRecipeBase *PredRecipe, 7317 VPlanPtr &Plan) { 7318 // Instructions marked for predication are replicated and placed under an 7319 // if-then construct to prevent side-effects. 7320 7321 // Generate recipes to compute the block mask for this region. 7322 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 7323 7324 // Build the triangular if-then region. 7325 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 7326 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 7327 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 7328 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 7329 auto *PHIRecipe = 7330 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 7331 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 7332 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 7333 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 7334 7335 // Note: first set Entry as region entry and then connect successors starting 7336 // from it in order, to propagate the "parent" of each VPBasicBlock. 7337 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 7338 VPBlockUtils::connectBlocks(Pred, Exit); 7339 7340 return Region; 7341 } 7342 7343 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 7344 VFRange &Range, 7345 VPlanPtr &Plan) { 7346 // First, check for specific widening recipes that deal with calls, memory 7347 // operations, inductions and Phi nodes. 7348 if (auto *CI = dyn_cast<CallInst>(Instr)) 7349 return tryToWidenCall(CI, Range, *Plan); 7350 7351 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 7352 return tryToWidenMemory(Instr, Range, Plan); 7353 7354 VPRecipeBase *Recipe; 7355 if (auto Phi = dyn_cast<PHINode>(Instr)) { 7356 if (Phi->getParent() != OrigLoop->getHeader()) 7357 return tryToBlend(Phi, Plan); 7358 if ((Recipe = tryToOptimizeInductionPHI(Phi))) 7359 return Recipe; 7360 return new VPWidenPHIRecipe(Phi); 7361 } 7362 7363 if (isa<TruncInst>(Instr) && 7364 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range))) 7365 return Recipe; 7366 7367 if (!shouldWiden(Instr, Range)) 7368 return nullptr; 7369 7370 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 7371 return new VPWidenGEPRecipe(GEP, Plan->mapToVPValues(GEP->operands()), 7372 OrigLoop); 7373 7374 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 7375 bool InvariantCond = 7376 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 7377 return new VPWidenSelectRecipe(*SI, Plan->mapToVPValues(SI->operands()), 7378 InvariantCond); 7379 } 7380 7381 return tryToWiden(Instr, *Plan); 7382 } 7383 7384 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 7385 unsigned MaxVF) { 7386 assert(OrigLoop->empty() && "Inner loop expected."); 7387 7388 // Collect conditions feeding internal conditional branches; they need to be 7389 // represented in VPlan for it to model masking. 7390 SmallPtrSet<Value *, 1> NeedDef; 7391 7392 auto *Latch = OrigLoop->getLoopLatch(); 7393 for (BasicBlock *BB : OrigLoop->blocks()) { 7394 if (BB == Latch) 7395 continue; 7396 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 7397 if (Branch && Branch->isConditional()) 7398 NeedDef.insert(Branch->getCondition()); 7399 } 7400 7401 // If the tail is to be folded by masking, the primary induction variable, if 7402 // exists needs to be represented in VPlan for it to model early-exit masking. 7403 // Also, both the Phi and the live-out instruction of each reduction are 7404 // required in order to introduce a select between them in VPlan. 7405 if (CM.foldTailByMasking()) { 7406 if (Legal->getPrimaryInduction()) 7407 NeedDef.insert(Legal->getPrimaryInduction()); 7408 for (auto &Reduction : Legal->getReductionVars()) { 7409 NeedDef.insert(Reduction.first); 7410 NeedDef.insert(Reduction.second.getLoopExitInstr()); 7411 } 7412 } 7413 7414 // Collect instructions from the original loop that will become trivially dead 7415 // in the vectorized loop. We don't need to vectorize these instructions. For 7416 // example, original induction update instructions can become dead because we 7417 // separately emit induction "steps" when generating code for the new loop. 7418 // Similarly, we create a new latch condition when setting up the structure 7419 // of the new loop, so the old one can become dead. 7420 SmallPtrSet<Instruction *, 4> DeadInstructions; 7421 collectTriviallyDeadInstructions(DeadInstructions); 7422 7423 // Add assume instructions we need to drop to DeadInstructions, to prevent 7424 // them from being added to the VPlan. 7425 // TODO: We only need to drop assumes in blocks that get flattend. If the 7426 // control flow is preserved, we should keep them. 7427 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 7428 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 7429 7430 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 7431 // Dead instructions do not need sinking. Remove them from SinkAfter. 7432 for (Instruction *I : DeadInstructions) 7433 SinkAfter.erase(I); 7434 7435 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 7436 VFRange SubRange = {VF, MaxVF + 1}; 7437 VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef, 7438 DeadInstructions, SinkAfter)); 7439 VF = SubRange.End; 7440 } 7441 } 7442 7443 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 7444 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 7445 SmallPtrSetImpl<Instruction *> &DeadInstructions, 7446 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 7447 7448 // Hold a mapping from predicated instructions to their recipes, in order to 7449 // fix their AlsoPack behavior if a user is determined to replicate and use a 7450 // scalar instead of vector value. 7451 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 7452 7453 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 7454 7455 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 7456 7457 // --------------------------------------------------------------------------- 7458 // Pre-construction: record ingredients whose recipes we'll need to further 7459 // process after constructing the initial VPlan. 7460 // --------------------------------------------------------------------------- 7461 7462 // Mark instructions we'll need to sink later and their targets as 7463 // ingredients whose recipe we'll need to record. 7464 for (auto &Entry : SinkAfter) { 7465 RecipeBuilder.recordRecipeOf(Entry.first); 7466 RecipeBuilder.recordRecipeOf(Entry.second); 7467 } 7468 for (auto &Reduction : CM.getInLoopReductionChains()) { 7469 PHINode *Phi = Reduction.first; 7470 RecurrenceDescriptor::RecurrenceKind Kind = 7471 Legal->getReductionVars()[Phi].getRecurrenceKind(); 7472 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 7473 7474 RecipeBuilder.recordRecipeOf(Phi); 7475 for (auto &R : ReductionOperations) { 7476 RecipeBuilder.recordRecipeOf(R); 7477 // For min/max reducitons, where we have a pair of icmp/select, we also 7478 // need to record the ICmp recipe, so it can be removed later. 7479 if (Kind == RecurrenceDescriptor::RK_IntegerMinMax || 7480 Kind == RecurrenceDescriptor::RK_FloatMinMax) { 7481 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 7482 } 7483 } 7484 } 7485 7486 // For each interleave group which is relevant for this (possibly trimmed) 7487 // Range, add it to the set of groups to be later applied to the VPlan and add 7488 // placeholders for its members' Recipes which we'll be replacing with a 7489 // single VPInterleaveRecipe. 7490 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 7491 auto applyIG = [IG, this](unsigned VF) -> bool { 7492 return (VF >= 2 && // Query is illegal for VF == 1 7493 CM.getWideningDecision(IG->getInsertPos(), VF) == 7494 LoopVectorizationCostModel::CM_Interleave); 7495 }; 7496 if (!getDecisionAndClampRange(applyIG, Range)) 7497 continue; 7498 InterleaveGroups.insert(IG); 7499 for (unsigned i = 0; i < IG->getFactor(); i++) 7500 if (Instruction *Member = IG->getMember(i)) 7501 RecipeBuilder.recordRecipeOf(Member); 7502 }; 7503 7504 // --------------------------------------------------------------------------- 7505 // Build initial VPlan: Scan the body of the loop in a topological order to 7506 // visit each basic block after having visited its predecessor basic blocks. 7507 // --------------------------------------------------------------------------- 7508 7509 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 7510 auto Plan = std::make_unique<VPlan>(); 7511 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 7512 Plan->setEntry(VPBB); 7513 7514 // Represent values that will have defs inside VPlan. 7515 for (Value *V : NeedDef) 7516 Plan->addVPValue(V); 7517 7518 // Scan the body of the loop in a topological order to visit each basic block 7519 // after having visited its predecessor basic blocks. 7520 LoopBlocksDFS DFS(OrigLoop); 7521 DFS.perform(LI); 7522 7523 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 7524 // Relevant instructions from basic block BB will be grouped into VPRecipe 7525 // ingredients and fill a new VPBasicBlock. 7526 unsigned VPBBsForBB = 0; 7527 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 7528 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 7529 VPBB = FirstVPBBForBB; 7530 Builder.setInsertPoint(VPBB); 7531 7532 // Introduce each ingredient into VPlan. 7533 // TODO: Model and preserve debug instrinsics in VPlan. 7534 for (Instruction &I : BB->instructionsWithoutDebug()) { 7535 Instruction *Instr = &I; 7536 7537 // First filter out irrelevant instructions, to ensure no recipes are 7538 // built for them. 7539 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 7540 continue; 7541 7542 if (auto Recipe = 7543 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 7544 RecipeBuilder.setRecipe(Instr, Recipe); 7545 VPBB->appendRecipe(Recipe); 7546 continue; 7547 } 7548 7549 // Otherwise, if all widening options failed, Instruction is to be 7550 // replicated. This may create a successor for VPBB. 7551 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 7552 Instr, Range, VPBB, PredInst2Recipe, Plan); 7553 if (NextVPBB != VPBB) { 7554 VPBB = NextVPBB; 7555 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 7556 : ""); 7557 } 7558 } 7559 } 7560 7561 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 7562 // may also be empty, such as the last one VPBB, reflecting original 7563 // basic-blocks with no recipes. 7564 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 7565 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 7566 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 7567 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 7568 delete PreEntry; 7569 7570 // --------------------------------------------------------------------------- 7571 // Transform initial VPlan: Apply previously taken decisions, in order, to 7572 // bring the VPlan to its final state. 7573 // --------------------------------------------------------------------------- 7574 7575 // Apply Sink-After legal constraints. 7576 for (auto &Entry : SinkAfter) { 7577 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 7578 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 7579 Sink->moveAfter(Target); 7580 } 7581 7582 // Interleave memory: for each Interleave Group we marked earlier as relevant 7583 // for this VPlan, replace the Recipes widening its memory instructions with a 7584 // single VPInterleaveRecipe at its insertion point. 7585 for (auto IG : InterleaveGroups) { 7586 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 7587 RecipeBuilder.getRecipe(IG->getInsertPos())); 7588 (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask())) 7589 ->insertBefore(Recipe); 7590 7591 for (unsigned i = 0; i < IG->getFactor(); ++i) 7592 if (Instruction *Member = IG->getMember(i)) { 7593 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 7594 } 7595 } 7596 7597 // Adjust the recipes for any inloop reductions. 7598 if (Range.Start > 1) 7599 adjustRecipesForInLoopReductions(Plan, RecipeBuilder); 7600 7601 // Finally, if tail is folded by masking, introduce selects between the phi 7602 // and the live-out instruction of each reduction, at the end of the latch. 7603 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 7604 Builder.setInsertPoint(VPBB); 7605 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 7606 for (auto &Reduction : Legal->getReductionVars()) { 7607 assert(!CM.isInLoopReduction(Reduction.first) && 7608 "Didn't expect inloop tail folded reduction yet!"); 7609 VPValue *Phi = Plan->getVPValue(Reduction.first); 7610 VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr()); 7611 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 7612 } 7613 } 7614 7615 std::string PlanName; 7616 raw_string_ostream RSO(PlanName); 7617 unsigned VF = Range.Start; 7618 Plan->addVF(VF); 7619 RSO << "Initial VPlan for VF={" << VF; 7620 for (VF *= 2; VF < Range.End; VF *= 2) { 7621 Plan->addVF(VF); 7622 RSO << "," << VF; 7623 } 7624 RSO << "},UF>=1"; 7625 RSO.flush(); 7626 Plan->setName(PlanName); 7627 7628 return Plan; 7629 } 7630 7631 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 7632 // Outer loop handling: They may require CFG and instruction level 7633 // transformations before even evaluating whether vectorization is profitable. 7634 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7635 // the vectorization pipeline. 7636 assert(!OrigLoop->empty()); 7637 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7638 7639 // Create new empty VPlan 7640 auto Plan = std::make_unique<VPlan>(); 7641 7642 // Build hierarchical CFG 7643 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 7644 HCFGBuilder.buildHierarchicalCFG(); 7645 7646 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 7647 Plan->addVF(VF); 7648 7649 if (EnableVPlanPredication) { 7650 VPlanPredicator VPP(*Plan); 7651 VPP.predicate(); 7652 7653 // Avoid running transformation to recipes until masked code generation in 7654 // VPlan-native path is in place. 7655 return Plan; 7656 } 7657 7658 SmallPtrSet<Instruction *, 1> DeadInstructions; 7659 VPlanTransforms::VPInstructionsToVPRecipes( 7660 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 7661 return Plan; 7662 } 7663 7664 // Adjust the recipes for any inloop reductions. The chain of instructions 7665 // leading from the loop exit instr to the phi need to be converted to 7666 // reductions, with one operand being vector and the other being the scalar 7667 // reduction chain. 7668 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 7669 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder) { 7670 for (auto &Reduction : CM.getInLoopReductionChains()) { 7671 PHINode *Phi = Reduction.first; 7672 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 7673 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 7674 7675 // ReductionOperations are orders top-down from the phi's use to the 7676 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 7677 // which of the two operands will remain scalar and which will be reduced. 7678 // For minmax the chain will be the select instructions. 7679 Instruction *Chain = Phi; 7680 for (Instruction *R : ReductionOperations) { 7681 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 7682 RecurrenceDescriptor::RecurrenceKind Kind = RdxDesc.getRecurrenceKind(); 7683 7684 VPValue *ChainOp = Plan->getVPValue(Chain); 7685 unsigned FirstOpId; 7686 if (Kind == RecurrenceDescriptor::RK_IntegerMinMax || 7687 Kind == RecurrenceDescriptor::RK_FloatMinMax) { 7688 assert(WidenRecipe->getVPRecipeID() == VPRecipeBase::VPWidenSelectSC && 7689 "Expected to replace a VPWidenSelectSC"); 7690 FirstOpId = 1; 7691 } else { 7692 assert(WidenRecipe->getVPRecipeID() == VPRecipeBase::VPWidenSC && 7693 "Expected to replace a VPWidenSC"); 7694 FirstOpId = 0; 7695 } 7696 unsigned VecOpId = 7697 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 7698 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 7699 7700 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 7701 &RdxDesc, R, ChainOp, VecOp, Legal->hasFunNoNaNAttr(), TTI); 7702 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 7703 WidenRecipe->eraseFromParent(); 7704 7705 if (Kind == RecurrenceDescriptor::RK_IntegerMinMax || 7706 Kind == RecurrenceDescriptor::RK_FloatMinMax) { 7707 VPRecipeBase *CompareRecipe = 7708 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 7709 assert(CompareRecipe->getVPRecipeID() == VPRecipeBase::VPWidenSC && 7710 "Expected to replace a VPWidenSC"); 7711 CompareRecipe->eraseFromParent(); 7712 } 7713 Chain = R; 7714 } 7715 } 7716 } 7717 7718 Value* LoopVectorizationPlanner::VPCallbackILV:: 7719 getOrCreateVectorValues(Value *V, unsigned Part) { 7720 return ILV.getOrCreateVectorValue(V, Part); 7721 } 7722 7723 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue( 7724 Value *V, const VPIteration &Instance) { 7725 return ILV.getOrCreateScalarValue(V, Instance); 7726 } 7727 7728 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 7729 VPSlotTracker &SlotTracker) const { 7730 O << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 7731 IG->getInsertPos()->printAsOperand(O, false); 7732 O << ", "; 7733 getAddr()->printAsOperand(O, SlotTracker); 7734 VPValue *Mask = getMask(); 7735 if (Mask) { 7736 O << ", "; 7737 Mask->printAsOperand(O, SlotTracker); 7738 } 7739 for (unsigned i = 0; i < IG->getFactor(); ++i) 7740 if (Instruction *I = IG->getMember(i)) 7741 O << "\\l\" +\n" << Indent << "\" " << VPlanIngredient(I) << " " << i; 7742 } 7743 7744 void VPWidenCallRecipe::execute(VPTransformState &State) { 7745 State.ILV->widenCallInstruction(Ingredient, User, State); 7746 } 7747 7748 void VPWidenSelectRecipe::execute(VPTransformState &State) { 7749 State.ILV->widenSelectInstruction(Ingredient, User, InvariantCond, State); 7750 } 7751 7752 void VPWidenRecipe::execute(VPTransformState &State) { 7753 State.ILV->widenInstruction(Ingredient, User, State); 7754 } 7755 7756 void VPWidenGEPRecipe::execute(VPTransformState &State) { 7757 State.ILV->widenGEP(GEP, User, State.UF, State.VF, IsPtrLoopInvariant, 7758 IsIndexLoopInvariant, State); 7759 } 7760 7761 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 7762 assert(!State.Instance && "Int or FP induction being replicated."); 7763 State.ILV->widenIntOrFpInduction(IV, Trunc); 7764 } 7765 7766 void VPWidenPHIRecipe::execute(VPTransformState &State) { 7767 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 7768 } 7769 7770 void VPBlendRecipe::execute(VPTransformState &State) { 7771 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7772 // We know that all PHIs in non-header blocks are converted into 7773 // selects, so we don't have to worry about the insertion order and we 7774 // can just use the builder. 7775 // At this point we generate the predication tree. There may be 7776 // duplications since this is a simple recursive scan, but future 7777 // optimizations will clean it up. 7778 7779 unsigned NumIncoming = getNumIncomingValues(); 7780 7781 // Generate a sequence of selects of the form: 7782 // SELECT(Mask3, In3, 7783 // SELECT(Mask2, In2, 7784 // SELECT(Mask1, In1, 7785 // In0))) 7786 // Note that Mask0 is never used: lanes for which no path reaches this phi and 7787 // are essentially undef are taken from In0. 7788 InnerLoopVectorizer::VectorParts Entry(State.UF); 7789 for (unsigned In = 0; In < NumIncoming; ++In) { 7790 for (unsigned Part = 0; Part < State.UF; ++Part) { 7791 // We might have single edge PHIs (blocks) - use an identity 7792 // 'select' for the first PHI operand. 7793 Value *In0 = State.get(getIncomingValue(In), Part); 7794 if (In == 0) 7795 Entry[Part] = In0; // Initialize with the first incoming value. 7796 else { 7797 // Select between the current value and the previous incoming edge 7798 // based on the incoming mask. 7799 Value *Cond = State.get(getMask(In), Part); 7800 Entry[Part] = 7801 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7802 } 7803 } 7804 } 7805 for (unsigned Part = 0; Part < State.UF; ++Part) 7806 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7807 } 7808 7809 void VPInterleaveRecipe::execute(VPTransformState &State) { 7810 assert(!State.Instance && "Interleave group being replicated."); 7811 State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask()); 7812 } 7813 7814 void VPReductionRecipe::execute(VPTransformState &State) { 7815 assert(!State.Instance && "Reduction being replicated."); 7816 for (unsigned Part = 0; Part < State.UF; ++Part) { 7817 unsigned Kind = RdxDesc->getRecurrenceKind(); 7818 Value *NewVecOp = State.get(VecOp, Part); 7819 Value *NewRed = 7820 createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp, NoNaN); 7821 Value *PrevInChain = State.get(ChainOp, Part); 7822 Value *NextInChain; 7823 if (Kind == RecurrenceDescriptor::RK_IntegerMinMax || 7824 Kind == RecurrenceDescriptor::RK_FloatMinMax) { 7825 NextInChain = 7826 createMinMaxOp(State.Builder, RdxDesc->getMinMaxRecurrenceKind(), 7827 NewRed, PrevInChain); 7828 } else { 7829 NextInChain = State.Builder.CreateBinOp( 7830 (Instruction::BinaryOps)I->getOpcode(), NewRed, PrevInChain); 7831 } 7832 State.ValueMap.setVectorValue(I, Part, NextInChain); 7833 } 7834 } 7835 7836 void VPReplicateRecipe::execute(VPTransformState &State) { 7837 if (State.Instance) { // Generate a single instance. 7838 State.ILV->scalarizeInstruction(Ingredient, User, *State.Instance, 7839 IsPredicated, State); 7840 // Insert scalar instance packing it into a vector. 7841 if (AlsoPack && State.VF > 1) { 7842 // If we're constructing lane 0, initialize to start from undef. 7843 if (State.Instance->Lane == 0) { 7844 Value *Undef = UndefValue::get( 7845 FixedVectorType::get(Ingredient->getType(), State.VF)); 7846 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7847 } 7848 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7849 } 7850 return; 7851 } 7852 7853 // Generate scalar instances for all VF lanes of all UF parts, unless the 7854 // instruction is uniform inwhich case generate only the first lane for each 7855 // of the UF parts. 7856 unsigned EndLane = IsUniform ? 1 : State.VF; 7857 for (unsigned Part = 0; Part < State.UF; ++Part) 7858 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7859 State.ILV->scalarizeInstruction(Ingredient, User, {Part, Lane}, 7860 IsPredicated, State); 7861 } 7862 7863 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7864 assert(State.Instance && "Branch on Mask works only on single instance."); 7865 7866 unsigned Part = State.Instance->Part; 7867 unsigned Lane = State.Instance->Lane; 7868 7869 Value *ConditionBit = nullptr; 7870 VPValue *BlockInMask = getMask(); 7871 if (BlockInMask) { 7872 ConditionBit = State.get(BlockInMask, Part); 7873 if (ConditionBit->getType()->isVectorTy()) 7874 ConditionBit = State.Builder.CreateExtractElement( 7875 ConditionBit, State.Builder.getInt32(Lane)); 7876 } else // Block in mask is all-one. 7877 ConditionBit = State.Builder.getTrue(); 7878 7879 // Replace the temporary unreachable terminator with a new conditional branch, 7880 // whose two destinations will be set later when they are created. 7881 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7882 assert(isa<UnreachableInst>(CurrentTerminator) && 7883 "Expected to replace unreachable terminator with conditional branch."); 7884 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7885 CondBr->setSuccessor(0, nullptr); 7886 ReplaceInstWithInst(CurrentTerminator, CondBr); 7887 } 7888 7889 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7890 assert(State.Instance && "Predicated instruction PHI works per instance."); 7891 Instruction *ScalarPredInst = cast<Instruction>( 7892 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7893 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7894 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7895 assert(PredicatingBB && "Predicated block has no single predecessor."); 7896 7897 // By current pack/unpack logic we need to generate only a single phi node: if 7898 // a vector value for the predicated instruction exists at this point it means 7899 // the instruction has vector users only, and a phi for the vector value is 7900 // needed. In this case the recipe of the predicated instruction is marked to 7901 // also do that packing, thereby "hoisting" the insert-element sequence. 7902 // Otherwise, a phi node for the scalar value is needed. 7903 unsigned Part = State.Instance->Part; 7904 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7905 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7906 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7907 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7908 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7909 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7910 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7911 } else { 7912 Type *PredInstType = PredInst->getType(); 7913 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7914 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7915 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7916 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7917 } 7918 } 7919 7920 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7921 VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr; 7922 State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue, 7923 getMask()); 7924 } 7925 7926 // Determine how to lower the scalar epilogue, which depends on 1) optimising 7927 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 7928 // predication, and 4) a TTI hook that analyses whether the loop is suitable 7929 // for predication. 7930 static ScalarEpilogueLowering getScalarEpilogueLowering( 7931 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 7932 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 7933 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 7934 LoopVectorizationLegality &LVL) { 7935 // 1) OptSize takes precedence over all other options, i.e. if this is set, 7936 // don't look at hints or options, and don't request a scalar epilogue. 7937 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 7938 // LoopAccessInfo (due to code dependency and not being able to reliably get 7939 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 7940 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 7941 // versioning when the vectorization is forced, unlike hasOptSize. So revert 7942 // back to the old way and vectorize with versioning when forced. See D81345.) 7943 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 7944 PGSOQueryType::IRPass) && 7945 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 7946 return CM_ScalarEpilogueNotAllowedOptSize; 7947 7948 bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() && 7949 !PreferPredicateOverEpilog; 7950 7951 // 2) Next, if disabling predication is requested on the command line, honour 7952 // this and request a scalar epilogue. 7953 if (PredicateOptDisabled) 7954 return CM_ScalarEpilogueAllowed; 7955 7956 // 3) and 4) look if enabling predication is requested on the command line, 7957 // with a loop hint, or if the TTI hook indicates this is profitable, request 7958 // predication . 7959 if (PreferPredicateOverEpilog || 7960 Hints.getPredicate() == LoopVectorizeHints::FK_Enabled || 7961 (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 7962 LVL.getLAI()) && 7963 Hints.getPredicate() != LoopVectorizeHints::FK_Disabled)) 7964 return CM_ScalarEpilogueNotNeededUsePredicate; 7965 7966 return CM_ScalarEpilogueAllowed; 7967 } 7968 7969 // Process the loop in the VPlan-native vectorization path. This path builds 7970 // VPlan upfront in the vectorization pipeline, which allows to apply 7971 // VPlan-to-VPlan transformations from the very beginning without modifying the 7972 // input LLVM IR. 7973 static bool processLoopInVPlanNativePath( 7974 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7975 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7976 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7977 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 7978 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 7979 7980 if (PSE.getBackedgeTakenCount() == PSE.getSE()->getCouldNotCompute()) { 7981 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 7982 return false; 7983 } 7984 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7985 Function *F = L->getHeader()->getParent(); 7986 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7987 7988 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 7989 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 7990 7991 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7992 &Hints, IAI); 7993 // Use the planner for outer loop vectorization. 7994 // TODO: CM is not used at this point inside the planner. Turn CM into an 7995 // optional argument if we don't need it in the future. 7996 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 7997 7998 // Get user vectorization factor. 7999 const unsigned UserVF = Hints.getWidth(); 8000 8001 // Plan how to best vectorize, return the best VF and its cost. 8002 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 8003 8004 // If we are stress testing VPlan builds, do not attempt to generate vector 8005 // code. Masked vector code generation support will follow soon. 8006 // Also, do not attempt to vectorize if no vector code will be produced. 8007 if (VPlanBuildStressTest || EnableVPlanPredication || 8008 VectorizationFactor::Disabled() == VF) 8009 return false; 8010 8011 LVP.setBestPlan(VF.Width, 1); 8012 8013 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 8014 &CM, BFI, PSI); 8015 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 8016 << L->getHeader()->getParent()->getName() << "\"\n"); 8017 LVP.executePlan(LB, DT); 8018 8019 // Mark the loop as already vectorized to avoid vectorizing again. 8020 Hints.setAlreadyVectorized(); 8021 8022 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 8023 return true; 8024 } 8025 8026 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 8027 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 8028 !EnableLoopInterleaving), 8029 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 8030 !EnableLoopVectorization) {} 8031 8032 bool LoopVectorizePass::processLoop(Loop *L) { 8033 assert((EnableVPlanNativePath || L->empty()) && 8034 "VPlan-native path is not enabled. Only process inner loops."); 8035 8036 #ifndef NDEBUG 8037 const std::string DebugLocStr = getDebugLocString(L); 8038 #endif /* NDEBUG */ 8039 8040 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 8041 << L->getHeader()->getParent()->getName() << "\" from " 8042 << DebugLocStr << "\n"); 8043 8044 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 8045 8046 LLVM_DEBUG( 8047 dbgs() << "LV: Loop hints:" 8048 << " force=" 8049 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 8050 ? "disabled" 8051 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 8052 ? "enabled" 8053 : "?")) 8054 << " width=" << Hints.getWidth() 8055 << " unroll=" << Hints.getInterleave() << "\n"); 8056 8057 // Function containing loop 8058 Function *F = L->getHeader()->getParent(); 8059 8060 // Looking at the diagnostic output is the only way to determine if a loop 8061 // was vectorized (other than looking at the IR or machine code), so it 8062 // is important to generate an optimization remark for each loop. Most of 8063 // these messages are generated as OptimizationRemarkAnalysis. Remarks 8064 // generated as OptimizationRemark and OptimizationRemarkMissed are 8065 // less verbose reporting vectorized loops and unvectorized loops that may 8066 // benefit from vectorization, respectively. 8067 8068 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 8069 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 8070 return false; 8071 } 8072 8073 PredicatedScalarEvolution PSE(*SE, *L); 8074 8075 // Check if it is legal to vectorize the loop. 8076 LoopVectorizationRequirements Requirements(*ORE); 8077 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 8078 &Requirements, &Hints, DB, AC, BFI, PSI); 8079 if (!LVL.canVectorize(EnableVPlanNativePath)) { 8080 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 8081 Hints.emitRemarkWithHints(); 8082 return false; 8083 } 8084 8085 // Check the function attributes and profiles to find out if this function 8086 // should be optimized for size. 8087 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 8088 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 8089 8090 // Entrance to the VPlan-native vectorization path. Outer loops are processed 8091 // here. They may require CFG and instruction level transformations before 8092 // even evaluating whether vectorization is profitable. Since we cannot modify 8093 // the incoming IR, we need to build VPlan upfront in the vectorization 8094 // pipeline. 8095 if (!L->empty()) 8096 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 8097 ORE, BFI, PSI, Hints); 8098 8099 assert(L->empty() && "Inner loop expected."); 8100 8101 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 8102 // count by optimizing for size, to minimize overheads. 8103 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 8104 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 8105 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 8106 << "This loop is worth vectorizing only if no scalar " 8107 << "iteration overheads are incurred."); 8108 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 8109 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 8110 else { 8111 LLVM_DEBUG(dbgs() << "\n"); 8112 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 8113 } 8114 } 8115 8116 // Check the function attributes to see if implicit floats are allowed. 8117 // FIXME: This check doesn't seem possibly correct -- what if the loop is 8118 // an integer loop and the vector instructions selected are purely integer 8119 // vector instructions? 8120 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 8121 reportVectorizationFailure( 8122 "Can't vectorize when the NoImplicitFloat attribute is used", 8123 "loop not vectorized due to NoImplicitFloat attribute", 8124 "NoImplicitFloat", ORE, L); 8125 Hints.emitRemarkWithHints(); 8126 return false; 8127 } 8128 8129 // Check if the target supports potentially unsafe FP vectorization. 8130 // FIXME: Add a check for the type of safety issue (denormal, signaling) 8131 // for the target we're vectorizing for, to make sure none of the 8132 // additional fp-math flags can help. 8133 if (Hints.isPotentiallyUnsafe() && 8134 TTI->isFPVectorizationPotentiallyUnsafe()) { 8135 reportVectorizationFailure( 8136 "Potentially unsafe FP op prevents vectorization", 8137 "loop not vectorized due to unsafe FP support.", 8138 "UnsafeFP", ORE, L); 8139 Hints.emitRemarkWithHints(); 8140 return false; 8141 } 8142 8143 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 8144 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 8145 8146 // If an override option has been passed in for interleaved accesses, use it. 8147 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 8148 UseInterleaved = EnableInterleavedMemAccesses; 8149 8150 // Analyze interleaved memory accesses. 8151 if (UseInterleaved) { 8152 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 8153 } 8154 8155 // Use the cost model. 8156 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 8157 F, &Hints, IAI); 8158 CM.collectValuesToIgnore(); 8159 8160 // Use the planner for vectorization. 8161 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 8162 8163 // Get user vectorization factor and interleave count. 8164 unsigned UserVF = Hints.getWidth(); 8165 unsigned UserIC = Hints.getInterleave(); 8166 8167 // Plan how to best vectorize, return the best VF and its cost. 8168 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 8169 8170 VectorizationFactor VF = VectorizationFactor::Disabled(); 8171 unsigned IC = 1; 8172 8173 if (MaybeVF) { 8174 VF = *MaybeVF; 8175 // Select the interleave count. 8176 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 8177 } 8178 8179 // Identify the diagnostic messages that should be produced. 8180 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 8181 bool VectorizeLoop = true, InterleaveLoop = true; 8182 if (Requirements.doesNotMeet(F, L, Hints)) { 8183 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 8184 "requirements.\n"); 8185 Hints.emitRemarkWithHints(); 8186 return false; 8187 } 8188 8189 if (VF.Width == 1) { 8190 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 8191 VecDiagMsg = std::make_pair( 8192 "VectorizationNotBeneficial", 8193 "the cost-model indicates that vectorization is not beneficial"); 8194 VectorizeLoop = false; 8195 } 8196 8197 if (!MaybeVF && UserIC > 1) { 8198 // Tell the user interleaving was avoided up-front, despite being explicitly 8199 // requested. 8200 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 8201 "interleaving should be avoided up front\n"); 8202 IntDiagMsg = std::make_pair( 8203 "InterleavingAvoided", 8204 "Ignoring UserIC, because interleaving was avoided up front"); 8205 InterleaveLoop = false; 8206 } else if (IC == 1 && UserIC <= 1) { 8207 // Tell the user interleaving is not beneficial. 8208 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 8209 IntDiagMsg = std::make_pair( 8210 "InterleavingNotBeneficial", 8211 "the cost-model indicates that interleaving is not beneficial"); 8212 InterleaveLoop = false; 8213 if (UserIC == 1) { 8214 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 8215 IntDiagMsg.second += 8216 " and is explicitly disabled or interleave count is set to 1"; 8217 } 8218 } else if (IC > 1 && UserIC == 1) { 8219 // Tell the user interleaving is beneficial, but it explicitly disabled. 8220 LLVM_DEBUG( 8221 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 8222 IntDiagMsg = std::make_pair( 8223 "InterleavingBeneficialButDisabled", 8224 "the cost-model indicates that interleaving is beneficial " 8225 "but is explicitly disabled or interleave count is set to 1"); 8226 InterleaveLoop = false; 8227 } 8228 8229 // Override IC if user provided an interleave count. 8230 IC = UserIC > 0 ? UserIC : IC; 8231 8232 // Emit diagnostic messages, if any. 8233 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 8234 if (!VectorizeLoop && !InterleaveLoop) { 8235 // Do not vectorize or interleaving the loop. 8236 ORE->emit([&]() { 8237 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 8238 L->getStartLoc(), L->getHeader()) 8239 << VecDiagMsg.second; 8240 }); 8241 ORE->emit([&]() { 8242 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 8243 L->getStartLoc(), L->getHeader()) 8244 << IntDiagMsg.second; 8245 }); 8246 return false; 8247 } else if (!VectorizeLoop && InterleaveLoop) { 8248 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 8249 ORE->emit([&]() { 8250 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 8251 L->getStartLoc(), L->getHeader()) 8252 << VecDiagMsg.second; 8253 }); 8254 } else if (VectorizeLoop && !InterleaveLoop) { 8255 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 8256 << ") in " << DebugLocStr << '\n'); 8257 ORE->emit([&]() { 8258 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 8259 L->getStartLoc(), L->getHeader()) 8260 << IntDiagMsg.second; 8261 }); 8262 } else if (VectorizeLoop && InterleaveLoop) { 8263 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 8264 << ") in " << DebugLocStr << '\n'); 8265 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 8266 } 8267 8268 LVP.setBestPlan(VF.Width, IC); 8269 8270 using namespace ore; 8271 bool DisableRuntimeUnroll = false; 8272 MDNode *OrigLoopID = L->getLoopID(); 8273 8274 if (!VectorizeLoop) { 8275 assert(IC > 1 && "interleave count should not be 1 or 0"); 8276 // If we decided that it is not legal to vectorize the loop, then 8277 // interleave it. 8278 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, &CM, 8279 BFI, PSI); 8280 LVP.executePlan(Unroller, DT); 8281 8282 ORE->emit([&]() { 8283 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 8284 L->getHeader()) 8285 << "interleaved loop (interleaved count: " 8286 << NV("InterleaveCount", IC) << ")"; 8287 }); 8288 } else { 8289 // If we decided that it is *legal* to vectorize the loop, then do it. 8290 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 8291 &LVL, &CM, BFI, PSI); 8292 LVP.executePlan(LB, DT); 8293 ++LoopsVectorized; 8294 8295 // Add metadata to disable runtime unrolling a scalar loop when there are 8296 // no runtime checks about strides and memory. A scalar loop that is 8297 // rarely used is not worth unrolling. 8298 if (!LB.areSafetyChecksAdded()) 8299 DisableRuntimeUnroll = true; 8300 8301 // Report the vectorization decision. 8302 ORE->emit([&]() { 8303 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 8304 L->getHeader()) 8305 << "vectorized loop (vectorization width: " 8306 << NV("VectorizationFactor", VF.Width) 8307 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 8308 }); 8309 } 8310 8311 Optional<MDNode *> RemainderLoopID = 8312 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 8313 LLVMLoopVectorizeFollowupEpilogue}); 8314 if (RemainderLoopID.hasValue()) { 8315 L->setLoopID(RemainderLoopID.getValue()); 8316 } else { 8317 if (DisableRuntimeUnroll) 8318 AddRuntimeUnrollDisableMetaData(L); 8319 8320 // Mark the loop as already vectorized to avoid vectorizing again. 8321 Hints.setAlreadyVectorized(); 8322 } 8323 8324 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 8325 return true; 8326 } 8327 8328 LoopVectorizeResult LoopVectorizePass::runImpl( 8329 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 8330 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 8331 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 8332 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 8333 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 8334 SE = &SE_; 8335 LI = &LI_; 8336 TTI = &TTI_; 8337 DT = &DT_; 8338 BFI = &BFI_; 8339 TLI = TLI_; 8340 AA = &AA_; 8341 AC = &AC_; 8342 GetLAA = &GetLAA_; 8343 DB = &DB_; 8344 ORE = &ORE_; 8345 PSI = PSI_; 8346 8347 // Don't attempt if 8348 // 1. the target claims to have no vector registers, and 8349 // 2. interleaving won't help ILP. 8350 // 8351 // The second condition is necessary because, even if the target has no 8352 // vector registers, loop vectorization may still enable scalar 8353 // interleaving. 8354 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 8355 TTI->getMaxInterleaveFactor(1) < 2) 8356 return LoopVectorizeResult(false, false); 8357 8358 bool Changed = false, CFGChanged = false; 8359 8360 // The vectorizer requires loops to be in simplified form. 8361 // Since simplification may add new inner loops, it has to run before the 8362 // legality and profitability checks. This means running the loop vectorizer 8363 // will simplify all loops, regardless of whether anything end up being 8364 // vectorized. 8365 for (auto &L : *LI) 8366 Changed |= CFGChanged |= 8367 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 8368 8369 // Build up a worklist of inner-loops to vectorize. This is necessary as 8370 // the act of vectorizing or partially unrolling a loop creates new loops 8371 // and can invalidate iterators across the loops. 8372 SmallVector<Loop *, 8> Worklist; 8373 8374 for (Loop *L : *LI) 8375 collectSupportedLoops(*L, LI, ORE, Worklist); 8376 8377 LoopsAnalyzed += Worklist.size(); 8378 8379 // Now walk the identified inner loops. 8380 while (!Worklist.empty()) { 8381 Loop *L = Worklist.pop_back_val(); 8382 8383 // For the inner loops we actually process, form LCSSA to simplify the 8384 // transform. 8385 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 8386 8387 Changed |= CFGChanged |= processLoop(L); 8388 } 8389 8390 // Process each loop nest in the function. 8391 return LoopVectorizeResult(Changed, CFGChanged); 8392 } 8393 8394 PreservedAnalyses LoopVectorizePass::run(Function &F, 8395 FunctionAnalysisManager &AM) { 8396 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 8397 auto &LI = AM.getResult<LoopAnalysis>(F); 8398 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 8399 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 8400 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 8401 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 8402 auto &AA = AM.getResult<AAManager>(F); 8403 auto &AC = AM.getResult<AssumptionAnalysis>(F); 8404 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 8405 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 8406 MemorySSA *MSSA = EnableMSSALoopDependency 8407 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 8408 : nullptr; 8409 8410 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 8411 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 8412 [&](Loop &L) -> const LoopAccessInfo & { 8413 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA}; 8414 return LAM.getResult<LoopAccessAnalysis>(L, AR); 8415 }; 8416 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 8417 ProfileSummaryInfo *PSI = 8418 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 8419 LoopVectorizeResult Result = 8420 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 8421 if (!Result.MadeAnyChange) 8422 return PreservedAnalyses::all(); 8423 PreservedAnalyses PA; 8424 8425 // We currently do not preserve loopinfo/dominator analyses with outer loop 8426 // vectorization. Until this is addressed, mark these analyses as preserved 8427 // only for non-VPlan-native path. 8428 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 8429 if (!EnableVPlanNativePath) { 8430 PA.preserve<LoopAnalysis>(); 8431 PA.preserve<DominatorTreeAnalysis>(); 8432 } 8433 PA.preserve<BasicAA>(); 8434 PA.preserve<GlobalsAA>(); 8435 if (!Result.MadeCFGChange) 8436 PA.preserveSet<CFGAnalyses>(); 8437 return PA; 8438 } 8439