1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlanHCFGBuilder.h" 60 #include "VPlanHCFGTransforms.h" 61 #include "VPlanPredicator.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DenseMapInfo.h" 66 #include "llvm/ADT/Hashing.h" 67 #include "llvm/ADT/MapVector.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SetVector.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/MemorySSA.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ScalarEvolution.h" 92 #include "llvm/Analysis/ScalarEvolutionExpander.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/Pass.h" 126 #include "llvm/Support/Casting.h" 127 #include "llvm/Support/CommandLine.h" 128 #include "llvm/Support/Compiler.h" 129 #include "llvm/Support/Debug.h" 130 #include "llvm/Support/ErrorHandling.h" 131 #include "llvm/Support/MathExtras.h" 132 #include "llvm/Support/raw_ostream.h" 133 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 134 #include "llvm/Transforms/Utils/LoopSimplify.h" 135 #include "llvm/Transforms/Utils/LoopUtils.h" 136 #include "llvm/Transforms/Utils/LoopVersioning.h" 137 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 138 #include <algorithm> 139 #include <cassert> 140 #include <cstdint> 141 #include <cstdlib> 142 #include <functional> 143 #include <iterator> 144 #include <limits> 145 #include <memory> 146 #include <string> 147 #include <tuple> 148 #include <utility> 149 #include <vector> 150 151 using namespace llvm; 152 153 #define LV_NAME "loop-vectorize" 154 #define DEBUG_TYPE LV_NAME 155 156 /// @{ 157 /// Metadata attribute names 158 static const char *const LLVMLoopVectorizeFollowupAll = 159 "llvm.loop.vectorize.followup_all"; 160 static const char *const LLVMLoopVectorizeFollowupVectorized = 161 "llvm.loop.vectorize.followup_vectorized"; 162 static const char *const LLVMLoopVectorizeFollowupEpilogue = 163 "llvm.loop.vectorize.followup_epilogue"; 164 /// @} 165 166 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 167 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 168 169 /// Loops with a known constant trip count below this number are vectorized only 170 /// if no scalar iteration overheads are incurred. 171 static cl::opt<unsigned> TinyTripCountVectorThreshold( 172 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 173 cl::desc("Loops with a constant trip count that is smaller than this " 174 "value are vectorized only if no scalar iteration overheads " 175 "are incurred.")); 176 177 static cl::opt<bool> MaximizeBandwidth( 178 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 179 cl::desc("Maximize bandwidth when selecting vectorization factor which " 180 "will be determined by the smallest type in loop.")); 181 182 static cl::opt<bool> EnableInterleavedMemAccesses( 183 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 184 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 185 186 /// An interleave-group may need masking if it resides in a block that needs 187 /// predication, or in order to mask away gaps. 188 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 189 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 190 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 191 192 /// We don't interleave loops with a known constant trip count below this 193 /// number. 194 static const unsigned TinyTripCountInterleaveThreshold = 128; 195 196 static cl::opt<unsigned> ForceTargetNumScalarRegs( 197 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 198 cl::desc("A flag that overrides the target's number of scalar registers.")); 199 200 static cl::opt<unsigned> ForceTargetNumVectorRegs( 201 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 202 cl::desc("A flag that overrides the target's number of vector registers.")); 203 204 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 205 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 206 cl::desc("A flag that overrides the target's max interleave factor for " 207 "scalar loops.")); 208 209 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 210 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 211 cl::desc("A flag that overrides the target's max interleave factor for " 212 "vectorized loops.")); 213 214 static cl::opt<unsigned> ForceTargetInstructionCost( 215 "force-target-instruction-cost", cl::init(0), cl::Hidden, 216 cl::desc("A flag that overrides the target's expected cost for " 217 "an instruction to a single constant value. Mostly " 218 "useful for getting consistent testing.")); 219 220 static cl::opt<unsigned> SmallLoopCost( 221 "small-loop-cost", cl::init(20), cl::Hidden, 222 cl::desc( 223 "The cost of a loop that is considered 'small' by the interleaver.")); 224 225 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 226 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 227 cl::desc("Enable the use of the block frequency analysis to access PGO " 228 "heuristics minimizing code growth in cold regions and being more " 229 "aggressive in hot regions.")); 230 231 // Runtime interleave loops for load/store throughput. 232 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 233 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 234 cl::desc( 235 "Enable runtime interleaving until load/store ports are saturated")); 236 237 /// The number of stores in a loop that are allowed to need predication. 238 static cl::opt<unsigned> NumberOfStoresToPredicate( 239 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 240 cl::desc("Max number of stores to be predicated behind an if.")); 241 242 static cl::opt<bool> EnableIndVarRegisterHeur( 243 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 244 cl::desc("Count the induction variable only once when interleaving")); 245 246 static cl::opt<bool> EnableCondStoresVectorization( 247 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 248 cl::desc("Enable if predication of stores during vectorization.")); 249 250 static cl::opt<unsigned> MaxNestedScalarReductionIC( 251 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 252 cl::desc("The maximum interleave count to use when interleaving a scalar " 253 "reduction in a nested loop.")); 254 255 cl::opt<bool> EnableVPlanNativePath( 256 "enable-vplan-native-path", cl::init(false), cl::Hidden, 257 cl::desc("Enable VPlan-native vectorization path with " 258 "support for outer loop vectorization.")); 259 260 // FIXME: Remove this switch once we have divergence analysis. Currently we 261 // assume divergent non-backedge branches when this switch is true. 262 cl::opt<bool> EnableVPlanPredication( 263 "enable-vplan-predication", cl::init(false), cl::Hidden, 264 cl::desc("Enable VPlan-native vectorization path predicator with " 265 "support for outer loop vectorization.")); 266 267 // This flag enables the stress testing of the VPlan H-CFG construction in the 268 // VPlan-native vectorization path. It must be used in conjuction with 269 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 270 // verification of the H-CFGs built. 271 static cl::opt<bool> VPlanBuildStressTest( 272 "vplan-build-stress-test", cl::init(false), cl::Hidden, 273 cl::desc( 274 "Build VPlan for every supported loop nest in the function and bail " 275 "out right after the build (stress test the VPlan H-CFG construction " 276 "in the VPlan-native vectorization path).")); 277 278 /// A helper function for converting Scalar types to vector types. 279 /// If the incoming type is void, we return void. If the VF is 1, we return 280 /// the scalar type. 281 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 282 if (Scalar->isVoidTy() || VF == 1) 283 return Scalar; 284 return VectorType::get(Scalar, VF); 285 } 286 287 /// A helper function that returns the type of loaded or stored value. 288 static Type *getMemInstValueType(Value *I) { 289 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 290 "Expected Load or Store instruction"); 291 if (auto *LI = dyn_cast<LoadInst>(I)) 292 return LI->getType(); 293 return cast<StoreInst>(I)->getValueOperand()->getType(); 294 } 295 296 /// A helper function that returns true if the given type is irregular. The 297 /// type is irregular if its allocated size doesn't equal the store size of an 298 /// element of the corresponding vector type at the given vectorization factor. 299 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 300 // Determine if an array of VF elements of type Ty is "bitcast compatible" 301 // with a <VF x Ty> vector. 302 if (VF > 1) { 303 auto *VectorTy = VectorType::get(Ty, VF); 304 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 305 } 306 307 // If the vectorization factor is one, we just check if an array of type Ty 308 // requires padding between elements. 309 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 310 } 311 312 /// A helper function that returns the reciprocal of the block probability of 313 /// predicated blocks. If we return X, we are assuming the predicated block 314 /// will execute once for every X iterations of the loop header. 315 /// 316 /// TODO: We should use actual block probability here, if available. Currently, 317 /// we always assume predicated blocks have a 50% chance of executing. 318 static unsigned getReciprocalPredBlockProb() { return 2; } 319 320 /// A helper function that adds a 'fast' flag to floating-point operations. 321 static Value *addFastMathFlag(Value *V) { 322 if (isa<FPMathOperator>(V)) 323 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 324 return V; 325 } 326 327 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 328 if (isa<FPMathOperator>(V)) 329 cast<Instruction>(V)->setFastMathFlags(FMF); 330 return V; 331 } 332 333 /// A helper function that returns an integer or floating-point constant with 334 /// value C. 335 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 336 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 337 : ConstantFP::get(Ty, C); 338 } 339 340 namespace llvm { 341 342 /// InnerLoopVectorizer vectorizes loops which contain only one basic 343 /// block to a specified vectorization factor (VF). 344 /// This class performs the widening of scalars into vectors, or multiple 345 /// scalars. This class also implements the following features: 346 /// * It inserts an epilogue loop for handling loops that don't have iteration 347 /// counts that are known to be a multiple of the vectorization factor. 348 /// * It handles the code generation for reduction variables. 349 /// * Scalarization (implementation using scalars) of un-vectorizable 350 /// instructions. 351 /// InnerLoopVectorizer does not perform any vectorization-legality 352 /// checks, and relies on the caller to check for the different legality 353 /// aspects. The InnerLoopVectorizer relies on the 354 /// LoopVectorizationLegality class to provide information about the induction 355 /// and reduction variables that were found to a given vectorization factor. 356 class InnerLoopVectorizer { 357 public: 358 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 359 LoopInfo *LI, DominatorTree *DT, 360 const TargetLibraryInfo *TLI, 361 const TargetTransformInfo *TTI, AssumptionCache *AC, 362 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 363 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 364 LoopVectorizationCostModel *CM) 365 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 366 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 367 Builder(PSE.getSE()->getContext()), 368 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 369 virtual ~InnerLoopVectorizer() = default; 370 371 /// Create a new empty loop. Unlink the old loop and connect the new one. 372 /// Return the pre-header block of the new loop. 373 BasicBlock *createVectorizedLoopSkeleton(); 374 375 /// Widen a single instruction within the innermost loop. 376 void widenInstruction(Instruction &I); 377 378 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 379 void fixVectorizedLoop(); 380 381 // Return true if any runtime check is added. 382 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 383 384 /// A type for vectorized values in the new loop. Each value from the 385 /// original loop, when vectorized, is represented by UF vector values in the 386 /// new unrolled loop, where UF is the unroll factor. 387 using VectorParts = SmallVector<Value *, 2>; 388 389 /// Vectorize a single PHINode in a block. This method handles the induction 390 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 391 /// arbitrary length vectors. 392 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 393 394 /// A helper function to scalarize a single Instruction in the innermost loop. 395 /// Generates a sequence of scalar instances for each lane between \p MinLane 396 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 397 /// inclusive.. 398 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 399 bool IfPredicateInstr); 400 401 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 402 /// is provided, the integer induction variable will first be truncated to 403 /// the corresponding type. 404 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 405 406 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 407 /// vector or scalar value on-demand if one is not yet available. When 408 /// vectorizing a loop, we visit the definition of an instruction before its 409 /// uses. When visiting the definition, we either vectorize or scalarize the 410 /// instruction, creating an entry for it in the corresponding map. (In some 411 /// cases, such as induction variables, we will create both vector and scalar 412 /// entries.) Then, as we encounter uses of the definition, we derive values 413 /// for each scalar or vector use unless such a value is already available. 414 /// For example, if we scalarize a definition and one of its uses is vector, 415 /// we build the required vector on-demand with an insertelement sequence 416 /// when visiting the use. Otherwise, if the use is scalar, we can use the 417 /// existing scalar definition. 418 /// 419 /// Return a value in the new loop corresponding to \p V from the original 420 /// loop at unroll index \p Part. If the value has already been vectorized, 421 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 422 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 423 /// a new vector value on-demand by inserting the scalar values into a vector 424 /// with an insertelement sequence. If the value has been neither vectorized 425 /// nor scalarized, it must be loop invariant, so we simply broadcast the 426 /// value into a vector. 427 Value *getOrCreateVectorValue(Value *V, unsigned Part); 428 429 /// Return a value in the new loop corresponding to \p V from the original 430 /// loop at unroll and vector indices \p Instance. If the value has been 431 /// vectorized but not scalarized, the necessary extractelement instruction 432 /// will be generated. 433 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 434 435 /// Construct the vector value of a scalarized value \p V one lane at a time. 436 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 437 438 /// Try to vectorize the interleaved access group that \p Instr belongs to, 439 /// optionally masking the vector operations if \p BlockInMask is non-null. 440 void vectorizeInterleaveGroup(Instruction *Instr, 441 VectorParts *BlockInMask = nullptr); 442 443 /// Vectorize Load and Store instructions, optionally masking the vector 444 /// operations if \p BlockInMask is non-null. 445 void vectorizeMemoryInstruction(Instruction *Instr, 446 VectorParts *BlockInMask = nullptr); 447 448 /// Set the debug location in the builder using the debug location in 449 /// the instruction. 450 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 451 452 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 453 void fixNonInductionPHIs(void); 454 455 protected: 456 friend class LoopVectorizationPlanner; 457 458 /// A small list of PHINodes. 459 using PhiVector = SmallVector<PHINode *, 4>; 460 461 /// A type for scalarized values in the new loop. Each value from the 462 /// original loop, when scalarized, is represented by UF x VF scalar values 463 /// in the new unrolled loop, where UF is the unroll factor and VF is the 464 /// vectorization factor. 465 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 466 467 /// Set up the values of the IVs correctly when exiting the vector loop. 468 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 469 Value *CountRoundDown, Value *EndValue, 470 BasicBlock *MiddleBlock); 471 472 /// Create a new induction variable inside L. 473 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 474 Value *Step, Instruction *DL); 475 476 /// Handle all cross-iteration phis in the header. 477 void fixCrossIterationPHIs(); 478 479 /// Fix a first-order recurrence. This is the second phase of vectorizing 480 /// this phi node. 481 void fixFirstOrderRecurrence(PHINode *Phi); 482 483 /// Fix a reduction cross-iteration phi. This is the second phase of 484 /// vectorizing this phi node. 485 void fixReduction(PHINode *Phi); 486 487 /// The Loop exit block may have single value PHI nodes with some 488 /// incoming value. While vectorizing we only handled real values 489 /// that were defined inside the loop and we should have one value for 490 /// each predecessor of its parent basic block. See PR14725. 491 void fixLCSSAPHIs(); 492 493 /// Iteratively sink the scalarized operands of a predicated instruction into 494 /// the block that was created for it. 495 void sinkScalarOperands(Instruction *PredInst); 496 497 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 498 /// represented as. 499 void truncateToMinimalBitwidths(); 500 501 /// Insert the new loop to the loop hierarchy and pass manager 502 /// and update the analysis passes. 503 void updateAnalysis(); 504 505 /// Create a broadcast instruction. This method generates a broadcast 506 /// instruction (shuffle) for loop invariant values and for the induction 507 /// value. If this is the induction variable then we extend it to N, N+1, ... 508 /// this is needed because each iteration in the loop corresponds to a SIMD 509 /// element. 510 virtual Value *getBroadcastInstrs(Value *V); 511 512 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 513 /// to each vector element of Val. The sequence starts at StartIndex. 514 /// \p Opcode is relevant for FP induction variable. 515 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 516 Instruction::BinaryOps Opcode = 517 Instruction::BinaryOpsEnd); 518 519 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 520 /// variable on which to base the steps, \p Step is the size of the step, and 521 /// \p EntryVal is the value from the original loop that maps to the steps. 522 /// Note that \p EntryVal doesn't have to be an induction variable - it 523 /// can also be a truncate instruction. 524 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 525 const InductionDescriptor &ID); 526 527 /// Create a vector induction phi node based on an existing scalar one. \p 528 /// EntryVal is the value from the original loop that maps to the vector phi 529 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 530 /// truncate instruction, instead of widening the original IV, we widen a 531 /// version of the IV truncated to \p EntryVal's type. 532 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 533 Value *Step, Instruction *EntryVal); 534 535 /// Returns true if an instruction \p I should be scalarized instead of 536 /// vectorized for the chosen vectorization factor. 537 bool shouldScalarizeInstruction(Instruction *I) const; 538 539 /// Returns true if we should generate a scalar version of \p IV. 540 bool needsScalarInduction(Instruction *IV) const; 541 542 /// If there is a cast involved in the induction variable \p ID, which should 543 /// be ignored in the vectorized loop body, this function records the 544 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 545 /// cast. We had already proved that the casted Phi is equal to the uncasted 546 /// Phi in the vectorized loop (under a runtime guard), and therefore 547 /// there is no need to vectorize the cast - the same value can be used in the 548 /// vector loop for both the Phi and the cast. 549 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 550 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 551 /// 552 /// \p EntryVal is the value from the original loop that maps to the vector 553 /// phi node and is used to distinguish what is the IV currently being 554 /// processed - original one (if \p EntryVal is a phi corresponding to the 555 /// original IV) or the "newly-created" one based on the proof mentioned above 556 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 557 /// latter case \p EntryVal is a TruncInst and we must not record anything for 558 /// that IV, but it's error-prone to expect callers of this routine to care 559 /// about that, hence this explicit parameter. 560 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 561 const Instruction *EntryVal, 562 Value *VectorLoopValue, 563 unsigned Part, 564 unsigned Lane = UINT_MAX); 565 566 /// Generate a shuffle sequence that will reverse the vector Vec. 567 virtual Value *reverseVector(Value *Vec); 568 569 /// Returns (and creates if needed) the original loop trip count. 570 Value *getOrCreateTripCount(Loop *NewLoop); 571 572 /// Returns (and creates if needed) the trip count of the widened loop. 573 Value *getOrCreateVectorTripCount(Loop *NewLoop); 574 575 /// Returns a bitcasted value to the requested vector type. 576 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 577 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 578 const DataLayout &DL); 579 580 /// Emit a bypass check to see if the vector trip count is zero, including if 581 /// it overflows. 582 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 583 584 /// Emit a bypass check to see if all of the SCEV assumptions we've 585 /// had to make are correct. 586 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 587 588 /// Emit bypass checks to check any memory assumptions we may have made. 589 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 590 591 /// Compute the transformed value of Index at offset StartValue using step 592 /// StepValue. 593 /// For integer induction, returns StartValue + Index * StepValue. 594 /// For pointer induction, returns StartValue[Index * StepValue]. 595 /// FIXME: The newly created binary instructions should contain nsw/nuw 596 /// flags, which can be found from the original scalar operations. 597 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 598 const DataLayout &DL, 599 const InductionDescriptor &ID) const; 600 601 /// Add additional metadata to \p To that was not present on \p Orig. 602 /// 603 /// Currently this is used to add the noalias annotations based on the 604 /// inserted memchecks. Use this for instructions that are *cloned* into the 605 /// vector loop. 606 void addNewMetadata(Instruction *To, const Instruction *Orig); 607 608 /// Add metadata from one instruction to another. 609 /// 610 /// This includes both the original MDs from \p From and additional ones (\see 611 /// addNewMetadata). Use this for *newly created* instructions in the vector 612 /// loop. 613 void addMetadata(Instruction *To, Instruction *From); 614 615 /// Similar to the previous function but it adds the metadata to a 616 /// vector of instructions. 617 void addMetadata(ArrayRef<Value *> To, Instruction *From); 618 619 /// The original loop. 620 Loop *OrigLoop; 621 622 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 623 /// dynamic knowledge to simplify SCEV expressions and converts them to a 624 /// more usable form. 625 PredicatedScalarEvolution &PSE; 626 627 /// Loop Info. 628 LoopInfo *LI; 629 630 /// Dominator Tree. 631 DominatorTree *DT; 632 633 /// Alias Analysis. 634 AliasAnalysis *AA; 635 636 /// Target Library Info. 637 const TargetLibraryInfo *TLI; 638 639 /// Target Transform Info. 640 const TargetTransformInfo *TTI; 641 642 /// Assumption Cache. 643 AssumptionCache *AC; 644 645 /// Interface to emit optimization remarks. 646 OptimizationRemarkEmitter *ORE; 647 648 /// LoopVersioning. It's only set up (non-null) if memchecks were 649 /// used. 650 /// 651 /// This is currently only used to add no-alias metadata based on the 652 /// memchecks. The actually versioning is performed manually. 653 std::unique_ptr<LoopVersioning> LVer; 654 655 /// The vectorization SIMD factor to use. Each vector will have this many 656 /// vector elements. 657 unsigned VF; 658 659 /// The vectorization unroll factor to use. Each scalar is vectorized to this 660 /// many different vector instructions. 661 unsigned UF; 662 663 /// The builder that we use 664 IRBuilder<> Builder; 665 666 // --- Vectorization state --- 667 668 /// The vector-loop preheader. 669 BasicBlock *LoopVectorPreHeader; 670 671 /// The scalar-loop preheader. 672 BasicBlock *LoopScalarPreHeader; 673 674 /// Middle Block between the vector and the scalar. 675 BasicBlock *LoopMiddleBlock; 676 677 /// The ExitBlock of the scalar loop. 678 BasicBlock *LoopExitBlock; 679 680 /// The vector loop body. 681 BasicBlock *LoopVectorBody; 682 683 /// The scalar loop body. 684 BasicBlock *LoopScalarBody; 685 686 /// A list of all bypass blocks. The first block is the entry of the loop. 687 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 688 689 /// The new Induction variable which was added to the new block. 690 PHINode *Induction = nullptr; 691 692 /// The induction variable of the old basic block. 693 PHINode *OldInduction = nullptr; 694 695 /// Maps values from the original loop to their corresponding values in the 696 /// vectorized loop. A key value can map to either vector values, scalar 697 /// values or both kinds of values, depending on whether the key was 698 /// vectorized and scalarized. 699 VectorizerValueMap VectorLoopValueMap; 700 701 /// Store instructions that were predicated. 702 SmallVector<Instruction *, 4> PredicatedInstructions; 703 704 /// Trip count of the original loop. 705 Value *TripCount = nullptr; 706 707 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 708 Value *VectorTripCount = nullptr; 709 710 /// The legality analysis. 711 LoopVectorizationLegality *Legal; 712 713 /// The profitablity analysis. 714 LoopVectorizationCostModel *Cost; 715 716 // Record whether runtime checks are added. 717 bool AddedSafetyChecks = false; 718 719 // Holds the end values for each induction variable. We save the end values 720 // so we can later fix-up the external users of the induction variables. 721 DenseMap<PHINode *, Value *> IVEndValues; 722 723 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 724 // fixed up at the end of vector code generation. 725 SmallVector<PHINode *, 8> OrigPHIsToFix; 726 }; 727 728 class InnerLoopUnroller : public InnerLoopVectorizer { 729 public: 730 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 731 LoopInfo *LI, DominatorTree *DT, 732 const TargetLibraryInfo *TLI, 733 const TargetTransformInfo *TTI, AssumptionCache *AC, 734 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 735 LoopVectorizationLegality *LVL, 736 LoopVectorizationCostModel *CM) 737 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 738 UnrollFactor, LVL, CM) {} 739 740 private: 741 Value *getBroadcastInstrs(Value *V) override; 742 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 743 Instruction::BinaryOps Opcode = 744 Instruction::BinaryOpsEnd) override; 745 Value *reverseVector(Value *Vec) override; 746 }; 747 748 } // end namespace llvm 749 750 /// Look for a meaningful debug location on the instruction or it's 751 /// operands. 752 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 753 if (!I) 754 return I; 755 756 DebugLoc Empty; 757 if (I->getDebugLoc() != Empty) 758 return I; 759 760 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 761 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 762 if (OpInst->getDebugLoc() != Empty) 763 return OpInst; 764 } 765 766 return I; 767 } 768 769 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 770 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 771 const DILocation *DIL = Inst->getDebugLoc(); 772 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 773 !isa<DbgInfoIntrinsic>(Inst)) { 774 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF); 775 if (NewDIL) 776 B.SetCurrentDebugLocation(NewDIL.getValue()); 777 else 778 LLVM_DEBUG(dbgs() 779 << "Failed to create new discriminator: " 780 << DIL->getFilename() << " Line: " << DIL->getLine()); 781 } 782 else 783 B.SetCurrentDebugLocation(DIL); 784 } else 785 B.SetCurrentDebugLocation(DebugLoc()); 786 } 787 788 #ifndef NDEBUG 789 /// \return string containing a file name and a line # for the given loop. 790 static std::string getDebugLocString(const Loop *L) { 791 std::string Result; 792 if (L) { 793 raw_string_ostream OS(Result); 794 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 795 LoopDbgLoc.print(OS); 796 else 797 // Just print the module name. 798 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 799 OS.flush(); 800 } 801 return Result; 802 } 803 #endif 804 805 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 806 const Instruction *Orig) { 807 // If the loop was versioned with memchecks, add the corresponding no-alias 808 // metadata. 809 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 810 LVer->annotateInstWithNoAlias(To, Orig); 811 } 812 813 void InnerLoopVectorizer::addMetadata(Instruction *To, 814 Instruction *From) { 815 propagateMetadata(To, From); 816 addNewMetadata(To, From); 817 } 818 819 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 820 Instruction *From) { 821 for (Value *V : To) { 822 if (Instruction *I = dyn_cast<Instruction>(V)) 823 addMetadata(I, From); 824 } 825 } 826 827 namespace llvm { 828 829 /// LoopVectorizationCostModel - estimates the expected speedups due to 830 /// vectorization. 831 /// In many cases vectorization is not profitable. This can happen because of 832 /// a number of reasons. In this class we mainly attempt to predict the 833 /// expected speedup/slowdowns due to the supported instruction set. We use the 834 /// TargetTransformInfo to query the different backends for the cost of 835 /// different operations. 836 class LoopVectorizationCostModel { 837 public: 838 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 839 LoopInfo *LI, LoopVectorizationLegality *Legal, 840 const TargetTransformInfo &TTI, 841 const TargetLibraryInfo *TLI, DemandedBits *DB, 842 AssumptionCache *AC, 843 OptimizationRemarkEmitter *ORE, const Function *F, 844 const LoopVectorizeHints *Hints, 845 InterleavedAccessInfo &IAI) 846 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 847 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {} 848 849 /// \return An upper bound for the vectorization factor, or None if 850 /// vectorization and interleaving should be avoided up front. 851 Optional<unsigned> computeMaxVF(bool OptForSize); 852 853 /// \return The most profitable vectorization factor and the cost of that VF. 854 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 855 /// then this vectorization factor will be selected if vectorization is 856 /// possible. 857 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 858 859 /// Setup cost-based decisions for user vectorization factor. 860 void selectUserVectorizationFactor(unsigned UserVF) { 861 collectUniformsAndScalars(UserVF); 862 collectInstsToScalarize(UserVF); 863 } 864 865 /// \return The size (in bits) of the smallest and widest types in the code 866 /// that needs to be vectorized. We ignore values that remain scalar such as 867 /// 64 bit loop indices. 868 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 869 870 /// \return The desired interleave count. 871 /// If interleave count has been specified by metadata it will be returned. 872 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 873 /// are the selected vectorization factor and the cost of the selected VF. 874 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 875 unsigned LoopCost); 876 877 /// Memory access instruction may be vectorized in more than one way. 878 /// Form of instruction after vectorization depends on cost. 879 /// This function takes cost-based decisions for Load/Store instructions 880 /// and collects them in a map. This decisions map is used for building 881 /// the lists of loop-uniform and loop-scalar instructions. 882 /// The calculated cost is saved with widening decision in order to 883 /// avoid redundant calculations. 884 void setCostBasedWideningDecision(unsigned VF); 885 886 /// A struct that represents some properties of the register usage 887 /// of a loop. 888 struct RegisterUsage { 889 /// Holds the number of loop invariant values that are used in the loop. 890 unsigned LoopInvariantRegs; 891 892 /// Holds the maximum number of concurrent live intervals in the loop. 893 unsigned MaxLocalUsers; 894 }; 895 896 /// \return Returns information about the register usages of the loop for the 897 /// given vectorization factors. 898 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 899 900 /// Collect values we want to ignore in the cost model. 901 void collectValuesToIgnore(); 902 903 /// \returns The smallest bitwidth each instruction can be represented with. 904 /// The vector equivalents of these instructions should be truncated to this 905 /// type. 906 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 907 return MinBWs; 908 } 909 910 /// \returns True if it is more profitable to scalarize instruction \p I for 911 /// vectorization factor \p VF. 912 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 913 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 914 915 // Cost model is not run in the VPlan-native path - return conservative 916 // result until this changes. 917 if (EnableVPlanNativePath) 918 return false; 919 920 auto Scalars = InstsToScalarize.find(VF); 921 assert(Scalars != InstsToScalarize.end() && 922 "VF not yet analyzed for scalarization profitability"); 923 return Scalars->second.find(I) != Scalars->second.end(); 924 } 925 926 /// Returns true if \p I is known to be uniform after vectorization. 927 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 928 if (VF == 1) 929 return true; 930 931 // Cost model is not run in the VPlan-native path - return conservative 932 // result until this changes. 933 if (EnableVPlanNativePath) 934 return false; 935 936 auto UniformsPerVF = Uniforms.find(VF); 937 assert(UniformsPerVF != Uniforms.end() && 938 "VF not yet analyzed for uniformity"); 939 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 940 } 941 942 /// Returns true if \p I is known to be scalar after vectorization. 943 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 944 if (VF == 1) 945 return true; 946 947 // Cost model is not run in the VPlan-native path - return conservative 948 // result until this changes. 949 if (EnableVPlanNativePath) 950 return false; 951 952 auto ScalarsPerVF = Scalars.find(VF); 953 assert(ScalarsPerVF != Scalars.end() && 954 "Scalar values are not calculated for VF"); 955 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 956 } 957 958 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 959 /// for vectorization factor \p VF. 960 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 961 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 962 !isProfitableToScalarize(I, VF) && 963 !isScalarAfterVectorization(I, VF); 964 } 965 966 /// Decision that was taken during cost calculation for memory instruction. 967 enum InstWidening { 968 CM_Unknown, 969 CM_Widen, // For consecutive accesses with stride +1. 970 CM_Widen_Reverse, // For consecutive accesses with stride -1. 971 CM_Interleave, 972 CM_GatherScatter, 973 CM_Scalarize 974 }; 975 976 /// Save vectorization decision \p W and \p Cost taken by the cost model for 977 /// instruction \p I and vector width \p VF. 978 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 979 unsigned Cost) { 980 assert(VF >= 2 && "Expected VF >=2"); 981 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 982 } 983 984 /// Save vectorization decision \p W and \p Cost taken by the cost model for 985 /// interleaving group \p Grp and vector width \p VF. 986 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 987 InstWidening W, unsigned Cost) { 988 assert(VF >= 2 && "Expected VF >=2"); 989 /// Broadcast this decicion to all instructions inside the group. 990 /// But the cost will be assigned to one instruction only. 991 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 992 if (auto *I = Grp->getMember(i)) { 993 if (Grp->getInsertPos() == I) 994 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 995 else 996 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 997 } 998 } 999 } 1000 1001 /// Return the cost model decision for the given instruction \p I and vector 1002 /// width \p VF. Return CM_Unknown if this instruction did not pass 1003 /// through the cost modeling. 1004 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1005 assert(VF >= 2 && "Expected VF >=2"); 1006 1007 // Cost model is not run in the VPlan-native path - return conservative 1008 // result until this changes. 1009 if (EnableVPlanNativePath) 1010 return CM_GatherScatter; 1011 1012 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1013 auto Itr = WideningDecisions.find(InstOnVF); 1014 if (Itr == WideningDecisions.end()) 1015 return CM_Unknown; 1016 return Itr->second.first; 1017 } 1018 1019 /// Return the vectorization cost for the given instruction \p I and vector 1020 /// width \p VF. 1021 unsigned getWideningCost(Instruction *I, unsigned VF) { 1022 assert(VF >= 2 && "Expected VF >=2"); 1023 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1024 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1025 "The cost is not calculated"); 1026 return WideningDecisions[InstOnVF].second; 1027 } 1028 1029 /// Return True if instruction \p I is an optimizable truncate whose operand 1030 /// is an induction variable. Such a truncate will be removed by adding a new 1031 /// induction variable with the destination type. 1032 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1033 // If the instruction is not a truncate, return false. 1034 auto *Trunc = dyn_cast<TruncInst>(I); 1035 if (!Trunc) 1036 return false; 1037 1038 // Get the source and destination types of the truncate. 1039 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1040 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1041 1042 // If the truncate is free for the given types, return false. Replacing a 1043 // free truncate with an induction variable would add an induction variable 1044 // update instruction to each iteration of the loop. We exclude from this 1045 // check the primary induction variable since it will need an update 1046 // instruction regardless. 1047 Value *Op = Trunc->getOperand(0); 1048 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1049 return false; 1050 1051 // If the truncated value is not an induction variable, return false. 1052 return Legal->isInductionPhi(Op); 1053 } 1054 1055 /// Collects the instructions to scalarize for each predicated instruction in 1056 /// the loop. 1057 void collectInstsToScalarize(unsigned VF); 1058 1059 /// Collect Uniform and Scalar values for the given \p VF. 1060 /// The sets depend on CM decision for Load/Store instructions 1061 /// that may be vectorized as interleave, gather-scatter or scalarized. 1062 void collectUniformsAndScalars(unsigned VF) { 1063 // Do the analysis once. 1064 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1065 return; 1066 setCostBasedWideningDecision(VF); 1067 collectLoopUniforms(VF); 1068 collectLoopScalars(VF); 1069 } 1070 1071 /// Returns true if the target machine supports masked store operation 1072 /// for the given \p DataType and kind of access to \p Ptr. 1073 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1074 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType); 1075 } 1076 1077 /// Returns true if the target machine supports masked load operation 1078 /// for the given \p DataType and kind of access to \p Ptr. 1079 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1080 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType); 1081 } 1082 1083 /// Returns true if the target machine supports masked scatter operation 1084 /// for the given \p DataType. 1085 bool isLegalMaskedScatter(Type *DataType) { 1086 return TTI.isLegalMaskedScatter(DataType); 1087 } 1088 1089 /// Returns true if the target machine supports masked gather operation 1090 /// for the given \p DataType. 1091 bool isLegalMaskedGather(Type *DataType) { 1092 return TTI.isLegalMaskedGather(DataType); 1093 } 1094 1095 /// Returns true if the target machine can represent \p V as a masked gather 1096 /// or scatter operation. 1097 bool isLegalGatherOrScatter(Value *V) { 1098 bool LI = isa<LoadInst>(V); 1099 bool SI = isa<StoreInst>(V); 1100 if (!LI && !SI) 1101 return false; 1102 auto *Ty = getMemInstValueType(V); 1103 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1104 } 1105 1106 /// Returns true if \p I is an instruction that will be scalarized with 1107 /// predication. Such instructions include conditional stores and 1108 /// instructions that may divide by zero. 1109 /// If a non-zero VF has been calculated, we check if I will be scalarized 1110 /// predication for that VF. 1111 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1112 1113 // Returns true if \p I is an instruction that will be predicated either 1114 // through scalar predication or masked load/store or masked gather/scatter. 1115 // Superset of instructions that return true for isScalarWithPredication. 1116 bool isPredicatedInst(Instruction *I) { 1117 if (!blockNeedsPredication(I->getParent())) 1118 return false; 1119 // Loads and stores that need some form of masked operation are predicated 1120 // instructions. 1121 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1122 return Legal->isMaskRequired(I); 1123 return isScalarWithPredication(I); 1124 } 1125 1126 /// Returns true if \p I is a memory instruction with consecutive memory 1127 /// access that can be widened. 1128 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1129 1130 /// Returns true if \p I is a memory instruction in an interleaved-group 1131 /// of memory accesses that can be vectorized with wide vector loads/stores 1132 /// and shuffles. 1133 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1134 1135 /// Check if \p Instr belongs to any interleaved access group. 1136 bool isAccessInterleaved(Instruction *Instr) { 1137 return InterleaveInfo.isInterleaved(Instr); 1138 } 1139 1140 /// Get the interleaved access group that \p Instr belongs to. 1141 const InterleaveGroup<Instruction> * 1142 getInterleavedAccessGroup(Instruction *Instr) { 1143 return InterleaveInfo.getInterleaveGroup(Instr); 1144 } 1145 1146 /// Returns true if an interleaved group requires a scalar iteration 1147 /// to handle accesses with gaps, and there is nothing preventing us from 1148 /// creating a scalar epilogue. 1149 bool requiresScalarEpilogue() const { 1150 return IsScalarEpilogueAllowed && InterleaveInfo.requiresScalarEpilogue(); 1151 } 1152 1153 /// Returns true if a scalar epilogue is not allowed due to optsize. 1154 bool isScalarEpilogueAllowed() const { return IsScalarEpilogueAllowed; } 1155 1156 /// Returns true if all loop blocks should be masked to fold tail loop. 1157 bool foldTailByMasking() const { return FoldTailByMasking; } 1158 1159 bool blockNeedsPredication(BasicBlock *BB) { 1160 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1161 } 1162 1163 private: 1164 unsigned NumPredStores = 0; 1165 1166 /// \return An upper bound for the vectorization factor, larger than zero. 1167 /// One is returned if vectorization should best be avoided due to cost. 1168 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 1169 1170 /// The vectorization cost is a combination of the cost itself and a boolean 1171 /// indicating whether any of the contributing operations will actually 1172 /// operate on 1173 /// vector values after type legalization in the backend. If this latter value 1174 /// is 1175 /// false, then all operations will be scalarized (i.e. no vectorization has 1176 /// actually taken place). 1177 using VectorizationCostTy = std::pair<unsigned, bool>; 1178 1179 /// Returns the expected execution cost. The unit of the cost does 1180 /// not matter because we use the 'cost' units to compare different 1181 /// vector widths. The cost that is returned is *not* normalized by 1182 /// the factor width. 1183 VectorizationCostTy expectedCost(unsigned VF); 1184 1185 /// Returns the execution time cost of an instruction for a given vector 1186 /// width. Vector width of one means scalar. 1187 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1188 1189 /// The cost-computation logic from getInstructionCost which provides 1190 /// the vector type as an output parameter. 1191 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1192 1193 /// Calculate vectorization cost of memory instruction \p I. 1194 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1195 1196 /// The cost computation for scalarized memory instruction. 1197 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1198 1199 /// The cost computation for interleaving group of memory instructions. 1200 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1201 1202 /// The cost computation for Gather/Scatter instruction. 1203 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1204 1205 /// The cost computation for widening instruction \p I with consecutive 1206 /// memory access. 1207 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1208 1209 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1210 /// Load: scalar load + broadcast. 1211 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1212 /// element) 1213 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1214 1215 /// Returns whether the instruction is a load or store and will be a emitted 1216 /// as a vector operation. 1217 bool isConsecutiveLoadOrStore(Instruction *I); 1218 1219 /// Returns true if an artificially high cost for emulated masked memrefs 1220 /// should be used. 1221 bool useEmulatedMaskMemRefHack(Instruction *I); 1222 1223 /// Create an analysis remark that explains why vectorization failed 1224 /// 1225 /// \p RemarkName is the identifier for the remark. \return the remark object 1226 /// that can be streamed to. 1227 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1228 return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1229 RemarkName, TheLoop); 1230 } 1231 1232 /// Map of scalar integer values to the smallest bitwidth they can be legally 1233 /// represented as. The vector equivalents of these values should be truncated 1234 /// to this type. 1235 MapVector<Instruction *, uint64_t> MinBWs; 1236 1237 /// A type representing the costs for instructions if they were to be 1238 /// scalarized rather than vectorized. The entries are Instruction-Cost 1239 /// pairs. 1240 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1241 1242 /// A set containing all BasicBlocks that are known to present after 1243 /// vectorization as a predicated block. 1244 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1245 1246 /// Records whether it is allowed to have the original scalar loop execute at 1247 /// least once. This may be needed as a fallback loop in case runtime 1248 /// aliasing/dependence checks fail, or to handle the tail/remainder 1249 /// iterations when the trip count is unknown or doesn't divide by the VF, 1250 /// or as a peel-loop to handle gaps in interleave-groups. 1251 /// Under optsize and when the trip count is very small we don't allow any 1252 /// iterations to execute in the scalar loop. 1253 bool IsScalarEpilogueAllowed = true; 1254 1255 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1256 bool FoldTailByMasking = false; 1257 1258 /// A map holding scalar costs for different vectorization factors. The 1259 /// presence of a cost for an instruction in the mapping indicates that the 1260 /// instruction will be scalarized when vectorizing with the associated 1261 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1262 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1263 1264 /// Holds the instructions known to be uniform after vectorization. 1265 /// The data is collected per VF. 1266 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1267 1268 /// Holds the instructions known to be scalar after vectorization. 1269 /// The data is collected per VF. 1270 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1271 1272 /// Holds the instructions (address computations) that are forced to be 1273 /// scalarized. 1274 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1275 1276 /// Returns the expected difference in cost from scalarizing the expression 1277 /// feeding a predicated instruction \p PredInst. The instructions to 1278 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1279 /// non-negative return value implies the expression will be scalarized. 1280 /// Currently, only single-use chains are considered for scalarization. 1281 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1282 unsigned VF); 1283 1284 /// Collect the instructions that are uniform after vectorization. An 1285 /// instruction is uniform if we represent it with a single scalar value in 1286 /// the vectorized loop corresponding to each vector iteration. Examples of 1287 /// uniform instructions include pointer operands of consecutive or 1288 /// interleaved memory accesses. Note that although uniformity implies an 1289 /// instruction will be scalar, the reverse is not true. In general, a 1290 /// scalarized instruction will be represented by VF scalar values in the 1291 /// vectorized loop, each corresponding to an iteration of the original 1292 /// scalar loop. 1293 void collectLoopUniforms(unsigned VF); 1294 1295 /// Collect the instructions that are scalar after vectorization. An 1296 /// instruction is scalar if it is known to be uniform or will be scalarized 1297 /// during vectorization. Non-uniform scalarized instructions will be 1298 /// represented by VF values in the vectorized loop, each corresponding to an 1299 /// iteration of the original scalar loop. 1300 void collectLoopScalars(unsigned VF); 1301 1302 /// Keeps cost model vectorization decision and cost for instructions. 1303 /// Right now it is used for memory instructions only. 1304 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1305 std::pair<InstWidening, unsigned>>; 1306 1307 DecisionList WideningDecisions; 1308 1309 public: 1310 /// The loop that we evaluate. 1311 Loop *TheLoop; 1312 1313 /// Predicated scalar evolution analysis. 1314 PredicatedScalarEvolution &PSE; 1315 1316 /// Loop Info analysis. 1317 LoopInfo *LI; 1318 1319 /// Vectorization legality. 1320 LoopVectorizationLegality *Legal; 1321 1322 /// Vector target information. 1323 const TargetTransformInfo &TTI; 1324 1325 /// Target Library Info. 1326 const TargetLibraryInfo *TLI; 1327 1328 /// Demanded bits analysis. 1329 DemandedBits *DB; 1330 1331 /// Assumption cache. 1332 AssumptionCache *AC; 1333 1334 /// Interface to emit optimization remarks. 1335 OptimizationRemarkEmitter *ORE; 1336 1337 const Function *TheFunction; 1338 1339 /// Loop Vectorize Hint. 1340 const LoopVectorizeHints *Hints; 1341 1342 /// The interleave access information contains groups of interleaved accesses 1343 /// with the same stride and close to each other. 1344 InterleavedAccessInfo &InterleaveInfo; 1345 1346 /// Values to ignore in the cost model. 1347 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1348 1349 /// Values to ignore in the cost model when VF > 1. 1350 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1351 }; 1352 1353 } // end namespace llvm 1354 1355 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1356 // vectorization. The loop needs to be annotated with #pragma omp simd 1357 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1358 // vector length information is not provided, vectorization is not considered 1359 // explicit. Interleave hints are not allowed either. These limitations will be 1360 // relaxed in the future. 1361 // Please, note that we are currently forced to abuse the pragma 'clang 1362 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1363 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1364 // provides *explicit vectorization hints* (LV can bypass legal checks and 1365 // assume that vectorization is legal). However, both hints are implemented 1366 // using the same metadata (llvm.loop.vectorize, processed by 1367 // LoopVectorizeHints). This will be fixed in the future when the native IR 1368 // representation for pragma 'omp simd' is introduced. 1369 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1370 OptimizationRemarkEmitter *ORE) { 1371 assert(!OuterLp->empty() && "This is not an outer loop"); 1372 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1373 1374 // Only outer loops with an explicit vectorization hint are supported. 1375 // Unannotated outer loops are ignored. 1376 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1377 return false; 1378 1379 Function *Fn = OuterLp->getHeader()->getParent(); 1380 if (!Hints.allowVectorization(Fn, OuterLp, 1381 true /*VectorizeOnlyWhenForced*/)) { 1382 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1383 return false; 1384 } 1385 1386 if (Hints.getInterleave() > 1) { 1387 // TODO: Interleave support is future work. 1388 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1389 "outer loops.\n"); 1390 Hints.emitRemarkWithHints(); 1391 return false; 1392 } 1393 1394 return true; 1395 } 1396 1397 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1398 OptimizationRemarkEmitter *ORE, 1399 SmallVectorImpl<Loop *> &V) { 1400 // Collect inner loops and outer loops without irreducible control flow. For 1401 // now, only collect outer loops that have explicit vectorization hints. If we 1402 // are stress testing the VPlan H-CFG construction, we collect the outermost 1403 // loop of every loop nest. 1404 if (L.empty() || VPlanBuildStressTest || 1405 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1406 LoopBlocksRPO RPOT(&L); 1407 RPOT.perform(LI); 1408 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1409 V.push_back(&L); 1410 // TODO: Collect inner loops inside marked outer loops in case 1411 // vectorization fails for the outer loop. Do not invoke 1412 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1413 // already known to be reducible. We can use an inherited attribute for 1414 // that. 1415 return; 1416 } 1417 } 1418 for (Loop *InnerL : L) 1419 collectSupportedLoops(*InnerL, LI, ORE, V); 1420 } 1421 1422 namespace { 1423 1424 /// The LoopVectorize Pass. 1425 struct LoopVectorize : public FunctionPass { 1426 /// Pass identification, replacement for typeid 1427 static char ID; 1428 1429 LoopVectorizePass Impl; 1430 1431 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1432 bool VectorizeOnlyWhenForced = false) 1433 : FunctionPass(ID) { 1434 Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced; 1435 Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced; 1436 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1437 } 1438 1439 bool runOnFunction(Function &F) override { 1440 if (skipFunction(F)) 1441 return false; 1442 1443 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1444 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1445 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1446 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1447 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1448 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1449 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1450 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1451 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1452 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1453 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1454 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1455 1456 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1457 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1458 1459 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1460 GetLAA, *ORE); 1461 } 1462 1463 void getAnalysisUsage(AnalysisUsage &AU) const override { 1464 AU.addRequired<AssumptionCacheTracker>(); 1465 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1466 AU.addRequired<DominatorTreeWrapperPass>(); 1467 AU.addRequired<LoopInfoWrapperPass>(); 1468 AU.addRequired<ScalarEvolutionWrapperPass>(); 1469 AU.addRequired<TargetTransformInfoWrapperPass>(); 1470 AU.addRequired<AAResultsWrapperPass>(); 1471 AU.addRequired<LoopAccessLegacyAnalysis>(); 1472 AU.addRequired<DemandedBitsWrapperPass>(); 1473 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1474 1475 // We currently do not preserve loopinfo/dominator analyses with outer loop 1476 // vectorization. Until this is addressed, mark these analyses as preserved 1477 // only for non-VPlan-native path. 1478 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1479 if (!EnableVPlanNativePath) { 1480 AU.addPreserved<LoopInfoWrapperPass>(); 1481 AU.addPreserved<DominatorTreeWrapperPass>(); 1482 } 1483 1484 AU.addPreserved<BasicAAWrapperPass>(); 1485 AU.addPreserved<GlobalsAAWrapperPass>(); 1486 } 1487 }; 1488 1489 } // end anonymous namespace 1490 1491 //===----------------------------------------------------------------------===// 1492 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1493 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1494 //===----------------------------------------------------------------------===// 1495 1496 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1497 // We need to place the broadcast of invariant variables outside the loop, 1498 // but only if it's proven safe to do so. Else, broadcast will be inside 1499 // vector loop body. 1500 Instruction *Instr = dyn_cast<Instruction>(V); 1501 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1502 (!Instr || 1503 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1504 // Place the code for broadcasting invariant variables in the new preheader. 1505 IRBuilder<>::InsertPointGuard Guard(Builder); 1506 if (SafeToHoist) 1507 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1508 1509 // Broadcast the scalar into all locations in the vector. 1510 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1511 1512 return Shuf; 1513 } 1514 1515 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1516 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1517 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1518 "Expected either an induction phi-node or a truncate of it!"); 1519 Value *Start = II.getStartValue(); 1520 1521 // Construct the initial value of the vector IV in the vector loop preheader 1522 auto CurrIP = Builder.saveIP(); 1523 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1524 if (isa<TruncInst>(EntryVal)) { 1525 assert(Start->getType()->isIntegerTy() && 1526 "Truncation requires an integer type"); 1527 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1528 Step = Builder.CreateTrunc(Step, TruncType); 1529 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1530 } 1531 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1532 Value *SteppedStart = 1533 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1534 1535 // We create vector phi nodes for both integer and floating-point induction 1536 // variables. Here, we determine the kind of arithmetic we will perform. 1537 Instruction::BinaryOps AddOp; 1538 Instruction::BinaryOps MulOp; 1539 if (Step->getType()->isIntegerTy()) { 1540 AddOp = Instruction::Add; 1541 MulOp = Instruction::Mul; 1542 } else { 1543 AddOp = II.getInductionOpcode(); 1544 MulOp = Instruction::FMul; 1545 } 1546 1547 // Multiply the vectorization factor by the step using integer or 1548 // floating-point arithmetic as appropriate. 1549 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1550 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1551 1552 // Create a vector splat to use in the induction update. 1553 // 1554 // FIXME: If the step is non-constant, we create the vector splat with 1555 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1556 // handle a constant vector splat. 1557 Value *SplatVF = isa<Constant>(Mul) 1558 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 1559 : Builder.CreateVectorSplat(VF, Mul); 1560 Builder.restoreIP(CurrIP); 1561 1562 // We may need to add the step a number of times, depending on the unroll 1563 // factor. The last of those goes into the PHI. 1564 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1565 &*LoopVectorBody->getFirstInsertionPt()); 1566 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1567 Instruction *LastInduction = VecInd; 1568 for (unsigned Part = 0; Part < UF; ++Part) { 1569 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1570 1571 if (isa<TruncInst>(EntryVal)) 1572 addMetadata(LastInduction, EntryVal); 1573 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1574 1575 LastInduction = cast<Instruction>(addFastMathFlag( 1576 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1577 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1578 } 1579 1580 // Move the last step to the end of the latch block. This ensures consistent 1581 // placement of all induction updates. 1582 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1583 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1584 auto *ICmp = cast<Instruction>(Br->getCondition()); 1585 LastInduction->moveBefore(ICmp); 1586 LastInduction->setName("vec.ind.next"); 1587 1588 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1589 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1590 } 1591 1592 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1593 return Cost->isScalarAfterVectorization(I, VF) || 1594 Cost->isProfitableToScalarize(I, VF); 1595 } 1596 1597 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1598 if (shouldScalarizeInstruction(IV)) 1599 return true; 1600 auto isScalarInst = [&](User *U) -> bool { 1601 auto *I = cast<Instruction>(U); 1602 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1603 }; 1604 return llvm::any_of(IV->users(), isScalarInst); 1605 } 1606 1607 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1608 const InductionDescriptor &ID, const Instruction *EntryVal, 1609 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1610 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1611 "Expected either an induction phi-node or a truncate of it!"); 1612 1613 // This induction variable is not the phi from the original loop but the 1614 // newly-created IV based on the proof that casted Phi is equal to the 1615 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1616 // re-uses the same InductionDescriptor that original IV uses but we don't 1617 // have to do any recording in this case - that is done when original IV is 1618 // processed. 1619 if (isa<TruncInst>(EntryVal)) 1620 return; 1621 1622 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1623 if (Casts.empty()) 1624 return; 1625 // Only the first Cast instruction in the Casts vector is of interest. 1626 // The rest of the Casts (if exist) have no uses outside the 1627 // induction update chain itself. 1628 Instruction *CastInst = *Casts.begin(); 1629 if (Lane < UINT_MAX) 1630 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1631 else 1632 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1633 } 1634 1635 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1636 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1637 "Primary induction variable must have an integer type"); 1638 1639 auto II = Legal->getInductionVars()->find(IV); 1640 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 1641 1642 auto ID = II->second; 1643 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1644 1645 // The scalar value to broadcast. This will be derived from the canonical 1646 // induction variable. 1647 Value *ScalarIV = nullptr; 1648 1649 // The value from the original loop to which we are mapping the new induction 1650 // variable. 1651 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1652 1653 // True if we have vectorized the induction variable. 1654 auto VectorizedIV = false; 1655 1656 // Determine if we want a scalar version of the induction variable. This is 1657 // true if the induction variable itself is not widened, or if it has at 1658 // least one user in the loop that is not widened. 1659 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 1660 1661 // Generate code for the induction step. Note that induction steps are 1662 // required to be loop-invariant 1663 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 1664 "Induction step should be loop invariant"); 1665 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1666 Value *Step = nullptr; 1667 if (PSE.getSE()->isSCEVable(IV->getType())) { 1668 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1669 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 1670 LoopVectorPreHeader->getTerminator()); 1671 } else { 1672 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 1673 } 1674 1675 // Try to create a new independent vector induction variable. If we can't 1676 // create the phi node, we will splat the scalar induction variable in each 1677 // loop iteration. 1678 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 1679 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1680 VectorizedIV = true; 1681 } 1682 1683 // If we haven't yet vectorized the induction variable, or if we will create 1684 // a scalar one, we need to define the scalar induction variable and step 1685 // values. If we were given a truncation type, truncate the canonical 1686 // induction variable and step. Otherwise, derive these values from the 1687 // induction descriptor. 1688 if (!VectorizedIV || NeedsScalarIV) { 1689 ScalarIV = Induction; 1690 if (IV != OldInduction) { 1691 ScalarIV = IV->getType()->isIntegerTy() 1692 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1693 : Builder.CreateCast(Instruction::SIToFP, Induction, 1694 IV->getType()); 1695 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1696 ScalarIV->setName("offset.idx"); 1697 } 1698 if (Trunc) { 1699 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1700 assert(Step->getType()->isIntegerTy() && 1701 "Truncation requires an integer step"); 1702 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1703 Step = Builder.CreateTrunc(Step, TruncType); 1704 } 1705 } 1706 1707 // If we haven't yet vectorized the induction variable, splat the scalar 1708 // induction variable, and build the necessary step vectors. 1709 // TODO: Don't do it unless the vectorized IV is really required. 1710 if (!VectorizedIV) { 1711 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1712 for (unsigned Part = 0; Part < UF; ++Part) { 1713 Value *EntryPart = 1714 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1715 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1716 if (Trunc) 1717 addMetadata(EntryPart, Trunc); 1718 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1719 } 1720 } 1721 1722 // If an induction variable is only used for counting loop iterations or 1723 // calculating addresses, it doesn't need to be widened. Create scalar steps 1724 // that can be used by instructions we will later scalarize. Note that the 1725 // addition of the scalar steps will not increase the number of instructions 1726 // in the loop in the common case prior to InstCombine. We will be trading 1727 // one vector extract for each scalar step. 1728 if (NeedsScalarIV) 1729 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1730 } 1731 1732 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1733 Instruction::BinaryOps BinOp) { 1734 // Create and check the types. 1735 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1736 int VLen = Val->getType()->getVectorNumElements(); 1737 1738 Type *STy = Val->getType()->getScalarType(); 1739 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1740 "Induction Step must be an integer or FP"); 1741 assert(Step->getType() == STy && "Step has wrong type"); 1742 1743 SmallVector<Constant *, 8> Indices; 1744 1745 if (STy->isIntegerTy()) { 1746 // Create a vector of consecutive numbers from zero to VF. 1747 for (int i = 0; i < VLen; ++i) 1748 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1749 1750 // Add the consecutive indices to the vector value. 1751 Constant *Cv = ConstantVector::get(Indices); 1752 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1753 Step = Builder.CreateVectorSplat(VLen, Step); 1754 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1755 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1756 // which can be found from the original scalar operations. 1757 Step = Builder.CreateMul(Cv, Step); 1758 return Builder.CreateAdd(Val, Step, "induction"); 1759 } 1760 1761 // Floating point induction. 1762 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1763 "Binary Opcode should be specified for FP induction"); 1764 // Create a vector of consecutive numbers from zero to VF. 1765 for (int i = 0; i < VLen; ++i) 1766 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1767 1768 // Add the consecutive indices to the vector value. 1769 Constant *Cv = ConstantVector::get(Indices); 1770 1771 Step = Builder.CreateVectorSplat(VLen, Step); 1772 1773 // Floating point operations had to be 'fast' to enable the induction. 1774 FastMathFlags Flags; 1775 Flags.setFast(); 1776 1777 Value *MulOp = Builder.CreateFMul(Cv, Step); 1778 if (isa<Instruction>(MulOp)) 1779 // Have to check, MulOp may be a constant 1780 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1781 1782 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1783 if (isa<Instruction>(BOp)) 1784 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1785 return BOp; 1786 } 1787 1788 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1789 Instruction *EntryVal, 1790 const InductionDescriptor &ID) { 1791 // We shouldn't have to build scalar steps if we aren't vectorizing. 1792 assert(VF > 1 && "VF should be greater than one"); 1793 1794 // Get the value type and ensure it and the step have the same integer type. 1795 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1796 assert(ScalarIVTy == Step->getType() && 1797 "Val and Step should have the same type"); 1798 1799 // We build scalar steps for both integer and floating-point induction 1800 // variables. Here, we determine the kind of arithmetic we will perform. 1801 Instruction::BinaryOps AddOp; 1802 Instruction::BinaryOps MulOp; 1803 if (ScalarIVTy->isIntegerTy()) { 1804 AddOp = Instruction::Add; 1805 MulOp = Instruction::Mul; 1806 } else { 1807 AddOp = ID.getInductionOpcode(); 1808 MulOp = Instruction::FMul; 1809 } 1810 1811 // Determine the number of scalars we need to generate for each unroll 1812 // iteration. If EntryVal is uniform, we only need to generate the first 1813 // lane. Otherwise, we generate all VF values. 1814 unsigned Lanes = 1815 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 1816 : VF; 1817 // Compute the scalar steps and save the results in VectorLoopValueMap. 1818 for (unsigned Part = 0; Part < UF; ++Part) { 1819 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 1820 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 1821 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 1822 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 1823 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 1824 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 1825 } 1826 } 1827 } 1828 1829 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 1830 assert(V != Induction && "The new induction variable should not be used."); 1831 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 1832 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1833 1834 // If we have a stride that is replaced by one, do it here. Defer this for 1835 // the VPlan-native path until we start running Legal checks in that path. 1836 if (!EnableVPlanNativePath && Legal->hasStride(V)) 1837 V = ConstantInt::get(V->getType(), 1); 1838 1839 // If we have a vector mapped to this value, return it. 1840 if (VectorLoopValueMap.hasVectorValue(V, Part)) 1841 return VectorLoopValueMap.getVectorValue(V, Part); 1842 1843 // If the value has not been vectorized, check if it has been scalarized 1844 // instead. If it has been scalarized, and we actually need the value in 1845 // vector form, we will construct the vector values on demand. 1846 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 1847 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 1848 1849 // If we've scalarized a value, that value should be an instruction. 1850 auto *I = cast<Instruction>(V); 1851 1852 // If we aren't vectorizing, we can just copy the scalar map values over to 1853 // the vector map. 1854 if (VF == 1) { 1855 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 1856 return ScalarValue; 1857 } 1858 1859 // Get the last scalar instruction we generated for V and Part. If the value 1860 // is known to be uniform after vectorization, this corresponds to lane zero 1861 // of the Part unroll iteration. Otherwise, the last instruction is the one 1862 // we created for the last vector lane of the Part unroll iteration. 1863 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 1864 auto *LastInst = cast<Instruction>( 1865 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 1866 1867 // Set the insert point after the last scalarized instruction. This ensures 1868 // the insertelement sequence will directly follow the scalar definitions. 1869 auto OldIP = Builder.saveIP(); 1870 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 1871 Builder.SetInsertPoint(&*NewIP); 1872 1873 // However, if we are vectorizing, we need to construct the vector values. 1874 // If the value is known to be uniform after vectorization, we can just 1875 // broadcast the scalar value corresponding to lane zero for each unroll 1876 // iteration. Otherwise, we construct the vector values using insertelement 1877 // instructions. Since the resulting vectors are stored in 1878 // VectorLoopValueMap, we will only generate the insertelements once. 1879 Value *VectorValue = nullptr; 1880 if (Cost->isUniformAfterVectorization(I, VF)) { 1881 VectorValue = getBroadcastInstrs(ScalarValue); 1882 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 1883 } else { 1884 // Initialize packing with insertelements to start from undef. 1885 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 1886 VectorLoopValueMap.setVectorValue(V, Part, Undef); 1887 for (unsigned Lane = 0; Lane < VF; ++Lane) 1888 packScalarIntoVectorValue(V, {Part, Lane}); 1889 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 1890 } 1891 Builder.restoreIP(OldIP); 1892 return VectorValue; 1893 } 1894 1895 // If this scalar is unknown, assume that it is a constant or that it is 1896 // loop invariant. Broadcast V and save the value for future uses. 1897 Value *B = getBroadcastInstrs(V); 1898 VectorLoopValueMap.setVectorValue(V, Part, B); 1899 return B; 1900 } 1901 1902 Value * 1903 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 1904 const VPIteration &Instance) { 1905 // If the value is not an instruction contained in the loop, it should 1906 // already be scalar. 1907 if (OrigLoop->isLoopInvariant(V)) 1908 return V; 1909 1910 assert(Instance.Lane > 0 1911 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 1912 : true && "Uniform values only have lane zero"); 1913 1914 // If the value from the original loop has not been vectorized, it is 1915 // represented by UF x VF scalar values in the new loop. Return the requested 1916 // scalar value. 1917 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 1918 return VectorLoopValueMap.getScalarValue(V, Instance); 1919 1920 // If the value has not been scalarized, get its entry in VectorLoopValueMap 1921 // for the given unroll part. If this entry is not a vector type (i.e., the 1922 // vectorization factor is one), there is no need to generate an 1923 // extractelement instruction. 1924 auto *U = getOrCreateVectorValue(V, Instance.Part); 1925 if (!U->getType()->isVectorTy()) { 1926 assert(VF == 1 && "Value not scalarized has non-vector type"); 1927 return U; 1928 } 1929 1930 // Otherwise, the value from the original loop has been vectorized and is 1931 // represented by UF vector values. Extract and return the requested scalar 1932 // value from the appropriate vector lane. 1933 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 1934 } 1935 1936 void InnerLoopVectorizer::packScalarIntoVectorValue( 1937 Value *V, const VPIteration &Instance) { 1938 assert(V != Induction && "The new induction variable should not be used."); 1939 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 1940 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1941 1942 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 1943 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 1944 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 1945 Builder.getInt32(Instance.Lane)); 1946 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 1947 } 1948 1949 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 1950 assert(Vec->getType()->isVectorTy() && "Invalid type"); 1951 SmallVector<Constant *, 8> ShuffleMask; 1952 for (unsigned i = 0; i < VF; ++i) 1953 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 1954 1955 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 1956 ConstantVector::get(ShuffleMask), 1957 "reverse"); 1958 } 1959 1960 // Return whether we allow using masked interleave-groups (for dealing with 1961 // strided loads/stores that reside in predicated blocks, or for dealing 1962 // with gaps). 1963 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 1964 // If an override option has been passed in for interleaved accesses, use it. 1965 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 1966 return EnableMaskedInterleavedMemAccesses; 1967 1968 return TTI.enableMaskedInterleavedAccessVectorization(); 1969 } 1970 1971 // Try to vectorize the interleave group that \p Instr belongs to. 1972 // 1973 // E.g. Translate following interleaved load group (factor = 3): 1974 // for (i = 0; i < N; i+=3) { 1975 // R = Pic[i]; // Member of index 0 1976 // G = Pic[i+1]; // Member of index 1 1977 // B = Pic[i+2]; // Member of index 2 1978 // ... // do something to R, G, B 1979 // } 1980 // To: 1981 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 1982 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 1983 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 1984 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 1985 // 1986 // Or translate following interleaved store group (factor = 3): 1987 // for (i = 0; i < N; i+=3) { 1988 // ... do something to R, G, B 1989 // Pic[i] = R; // Member of index 0 1990 // Pic[i+1] = G; // Member of index 1 1991 // Pic[i+2] = B; // Member of index 2 1992 // } 1993 // To: 1994 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 1995 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 1996 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 1997 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 1998 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 1999 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr, 2000 VectorParts *BlockInMask) { 2001 const InterleaveGroup<Instruction> *Group = 2002 Cost->getInterleavedAccessGroup(Instr); 2003 assert(Group && "Fail to get an interleaved access group."); 2004 2005 // Skip if current instruction is not the insert position. 2006 if (Instr != Group->getInsertPos()) 2007 return; 2008 2009 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2010 Value *Ptr = getLoadStorePointerOperand(Instr); 2011 2012 // Prepare for the vector type of the interleaved load/store. 2013 Type *ScalarTy = getMemInstValueType(Instr); 2014 unsigned InterleaveFactor = Group->getFactor(); 2015 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2016 Type *PtrTy = VecTy->getPointerTo(getLoadStoreAddressSpace(Instr)); 2017 2018 // Prepare for the new pointers. 2019 setDebugLocFromInst(Builder, Ptr); 2020 SmallVector<Value *, 2> NewPtrs; 2021 unsigned Index = Group->getIndex(Instr); 2022 2023 VectorParts Mask; 2024 bool IsMaskForCondRequired = BlockInMask; 2025 if (IsMaskForCondRequired) { 2026 Mask = *BlockInMask; 2027 // TODO: extend the masked interleaved-group support to reversed access. 2028 assert(!Group->isReverse() && "Reversed masked interleave-group " 2029 "not supported."); 2030 } 2031 2032 // If the group is reverse, adjust the index to refer to the last vector lane 2033 // instead of the first. We adjust the index from the first vector lane, 2034 // rather than directly getting the pointer for lane VF - 1, because the 2035 // pointer operand of the interleaved access is supposed to be uniform. For 2036 // uniform instructions, we're only required to generate a value for the 2037 // first vector lane in each unroll iteration. 2038 if (Group->isReverse()) 2039 Index += (VF - 1) * Group->getFactor(); 2040 2041 bool InBounds = false; 2042 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2043 InBounds = gep->isInBounds(); 2044 2045 for (unsigned Part = 0; Part < UF; Part++) { 2046 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2047 2048 // Notice current instruction could be any index. Need to adjust the address 2049 // to the member of index 0. 2050 // 2051 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2052 // b = A[i]; // Member of index 0 2053 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2054 // 2055 // E.g. A[i+1] = a; // Member of index 1 2056 // A[i] = b; // Member of index 0 2057 // A[i+2] = c; // Member of index 2 (Current instruction) 2058 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2059 NewPtr = Builder.CreateGEP(ScalarTy, NewPtr, Builder.getInt32(-Index)); 2060 if (InBounds) 2061 cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true); 2062 2063 // Cast to the vector pointer type. 2064 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2065 } 2066 2067 setDebugLocFromInst(Builder, Instr); 2068 Value *UndefVec = UndefValue::get(VecTy); 2069 2070 Value *MaskForGaps = nullptr; 2071 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2072 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2073 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2074 } 2075 2076 // Vectorize the interleaved load group. 2077 if (isa<LoadInst>(Instr)) { 2078 // For each unroll part, create a wide load for the group. 2079 SmallVector<Value *, 2> NewLoads; 2080 for (unsigned Part = 0; Part < UF; Part++) { 2081 Instruction *NewLoad; 2082 if (IsMaskForCondRequired || MaskForGaps) { 2083 assert(useMaskedInterleavedAccesses(*TTI) && 2084 "masked interleaved groups are not allowed."); 2085 Value *GroupMask = MaskForGaps; 2086 if (IsMaskForCondRequired) { 2087 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2088 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2089 Value *ShuffledMask = Builder.CreateShuffleVector( 2090 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2091 GroupMask = MaskForGaps 2092 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2093 MaskForGaps) 2094 : ShuffledMask; 2095 } 2096 NewLoad = 2097 Builder.CreateMaskedLoad(NewPtrs[Part], Group->getAlignment(), 2098 GroupMask, UndefVec, "wide.masked.vec"); 2099 } 2100 else 2101 NewLoad = Builder.CreateAlignedLoad(VecTy, NewPtrs[Part], 2102 Group->getAlignment(), "wide.vec"); 2103 Group->addMetadata(NewLoad); 2104 NewLoads.push_back(NewLoad); 2105 } 2106 2107 // For each member in the group, shuffle out the appropriate data from the 2108 // wide loads. 2109 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2110 Instruction *Member = Group->getMember(I); 2111 2112 // Skip the gaps in the group. 2113 if (!Member) 2114 continue; 2115 2116 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2117 for (unsigned Part = 0; Part < UF; Part++) { 2118 Value *StridedVec = Builder.CreateShuffleVector( 2119 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2120 2121 // If this member has different type, cast the result type. 2122 if (Member->getType() != ScalarTy) { 2123 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2124 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2125 } 2126 2127 if (Group->isReverse()) 2128 StridedVec = reverseVector(StridedVec); 2129 2130 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2131 } 2132 } 2133 return; 2134 } 2135 2136 // The sub vector type for current instruction. 2137 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2138 2139 // Vectorize the interleaved store group. 2140 for (unsigned Part = 0; Part < UF; Part++) { 2141 // Collect the stored vector from each member. 2142 SmallVector<Value *, 4> StoredVecs; 2143 for (unsigned i = 0; i < InterleaveFactor; i++) { 2144 // Interleaved store group doesn't allow a gap, so each index has a member 2145 Instruction *Member = Group->getMember(i); 2146 assert(Member && "Fail to get a member from an interleaved store group"); 2147 2148 Value *StoredVec = getOrCreateVectorValue( 2149 cast<StoreInst>(Member)->getValueOperand(), Part); 2150 if (Group->isReverse()) 2151 StoredVec = reverseVector(StoredVec); 2152 2153 // If this member has different type, cast it to a unified type. 2154 2155 if (StoredVec->getType() != SubVT) 2156 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2157 2158 StoredVecs.push_back(StoredVec); 2159 } 2160 2161 // Concatenate all vectors into a wide vector. 2162 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2163 2164 // Interleave the elements in the wide vector. 2165 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2166 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2167 "interleaved.vec"); 2168 2169 Instruction *NewStoreInstr; 2170 if (IsMaskForCondRequired) { 2171 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2172 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2173 Value *ShuffledMask = Builder.CreateShuffleVector( 2174 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2175 NewStoreInstr = Builder.CreateMaskedStore( 2176 IVec, NewPtrs[Part], Group->getAlignment(), ShuffledMask); 2177 } 2178 else 2179 NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part], 2180 Group->getAlignment()); 2181 2182 Group->addMetadata(NewStoreInstr); 2183 } 2184 } 2185 2186 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2187 VectorParts *BlockInMask) { 2188 // Attempt to issue a wide load. 2189 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2190 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2191 2192 assert((LI || SI) && "Invalid Load/Store instruction"); 2193 2194 LoopVectorizationCostModel::InstWidening Decision = 2195 Cost->getWideningDecision(Instr, VF); 2196 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2197 "CM decision should be taken at this point"); 2198 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2199 return vectorizeInterleaveGroup(Instr); 2200 2201 Type *ScalarDataTy = getMemInstValueType(Instr); 2202 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2203 Value *Ptr = getLoadStorePointerOperand(Instr); 2204 unsigned Alignment = getLoadStoreAlignment(Instr); 2205 // An alignment of 0 means target abi alignment. We need to use the scalar's 2206 // target abi alignment in such a case. 2207 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2208 if (!Alignment) 2209 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2210 unsigned AddressSpace = getLoadStoreAddressSpace(Instr); 2211 2212 // Determine if the pointer operand of the access is either consecutive or 2213 // reverse consecutive. 2214 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2215 bool ConsecutiveStride = 2216 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2217 bool CreateGatherScatter = 2218 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2219 2220 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2221 // gather/scatter. Otherwise Decision should have been to Scalarize. 2222 assert((ConsecutiveStride || CreateGatherScatter) && 2223 "The instruction should be scalarized"); 2224 2225 // Handle consecutive loads/stores. 2226 if (ConsecutiveStride) 2227 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 2228 2229 VectorParts Mask; 2230 bool isMaskRequired = BlockInMask; 2231 if (isMaskRequired) 2232 Mask = *BlockInMask; 2233 2234 bool InBounds = false; 2235 if (auto *gep = dyn_cast<GetElementPtrInst>( 2236 getLoadStorePointerOperand(Instr)->stripPointerCasts())) 2237 InBounds = gep->isInBounds(); 2238 2239 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2240 // Calculate the pointer for the specific unroll-part. 2241 GetElementPtrInst *PartPtr = nullptr; 2242 2243 if (Reverse) { 2244 // If the address is consecutive but reversed, then the 2245 // wide store needs to start at the last vector element. 2246 PartPtr = cast<GetElementPtrInst>( 2247 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF))); 2248 PartPtr->setIsInBounds(InBounds); 2249 PartPtr = cast<GetElementPtrInst>( 2250 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF))); 2251 PartPtr->setIsInBounds(InBounds); 2252 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2253 Mask[Part] = reverseVector(Mask[Part]); 2254 } else { 2255 PartPtr = cast<GetElementPtrInst>( 2256 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF))); 2257 PartPtr->setIsInBounds(InBounds); 2258 } 2259 2260 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2261 }; 2262 2263 // Handle Stores: 2264 if (SI) { 2265 setDebugLocFromInst(Builder, SI); 2266 2267 for (unsigned Part = 0; Part < UF; ++Part) { 2268 Instruction *NewSI = nullptr; 2269 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 2270 if (CreateGatherScatter) { 2271 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2272 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2273 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2274 MaskPart); 2275 } else { 2276 if (Reverse) { 2277 // If we store to reverse consecutive memory locations, then we need 2278 // to reverse the order of elements in the stored value. 2279 StoredVal = reverseVector(StoredVal); 2280 // We don't want to update the value in the map as it might be used in 2281 // another expression. So don't call resetVectorValue(StoredVal). 2282 } 2283 auto *VecPtr = CreateVecPtr(Part, Ptr); 2284 if (isMaskRequired) 2285 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2286 Mask[Part]); 2287 else 2288 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2289 } 2290 addMetadata(NewSI, SI); 2291 } 2292 return; 2293 } 2294 2295 // Handle loads. 2296 assert(LI && "Must have a load instruction"); 2297 setDebugLocFromInst(Builder, LI); 2298 for (unsigned Part = 0; Part < UF; ++Part) { 2299 Value *NewLI; 2300 if (CreateGatherScatter) { 2301 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2302 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2303 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2304 nullptr, "wide.masked.gather"); 2305 addMetadata(NewLI, LI); 2306 } else { 2307 auto *VecPtr = CreateVecPtr(Part, Ptr); 2308 if (isMaskRequired) 2309 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2310 UndefValue::get(DataTy), 2311 "wide.masked.load"); 2312 else 2313 NewLI = 2314 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2315 2316 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2317 addMetadata(NewLI, LI); 2318 if (Reverse) 2319 NewLI = reverseVector(NewLI); 2320 } 2321 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2322 } 2323 } 2324 2325 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2326 const VPIteration &Instance, 2327 bool IfPredicateInstr) { 2328 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2329 2330 setDebugLocFromInst(Builder, Instr); 2331 2332 // Does this instruction return a value ? 2333 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2334 2335 Instruction *Cloned = Instr->clone(); 2336 if (!IsVoidRetTy) 2337 Cloned->setName(Instr->getName() + ".cloned"); 2338 2339 // Replace the operands of the cloned instructions with their scalar 2340 // equivalents in the new loop. 2341 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2342 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2343 Cloned->setOperand(op, NewOp); 2344 } 2345 addNewMetadata(Cloned, Instr); 2346 2347 // Place the cloned scalar in the new loop. 2348 Builder.Insert(Cloned); 2349 2350 // Add the cloned scalar to the scalar map entry. 2351 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2352 2353 // If we just cloned a new assumption, add it the assumption cache. 2354 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2355 if (II->getIntrinsicID() == Intrinsic::assume) 2356 AC->registerAssumption(II); 2357 2358 // End if-block. 2359 if (IfPredicateInstr) 2360 PredicatedInstructions.push_back(Cloned); 2361 } 2362 2363 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2364 Value *End, Value *Step, 2365 Instruction *DL) { 2366 BasicBlock *Header = L->getHeader(); 2367 BasicBlock *Latch = L->getLoopLatch(); 2368 // As we're just creating this loop, it's possible no latch exists 2369 // yet. If so, use the header as this will be a single block loop. 2370 if (!Latch) 2371 Latch = Header; 2372 2373 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2374 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2375 setDebugLocFromInst(Builder, OldInst); 2376 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2377 2378 Builder.SetInsertPoint(Latch->getTerminator()); 2379 setDebugLocFromInst(Builder, OldInst); 2380 2381 // Create i+1 and fill the PHINode. 2382 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2383 Induction->addIncoming(Start, L->getLoopPreheader()); 2384 Induction->addIncoming(Next, Latch); 2385 // Create the compare. 2386 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2387 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2388 2389 // Now we have two terminators. Remove the old one from the block. 2390 Latch->getTerminator()->eraseFromParent(); 2391 2392 return Induction; 2393 } 2394 2395 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2396 if (TripCount) 2397 return TripCount; 2398 2399 assert(L && "Create Trip Count for null loop."); 2400 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2401 // Find the loop boundaries. 2402 ScalarEvolution *SE = PSE.getSE(); 2403 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2404 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2405 "Invalid loop count"); 2406 2407 Type *IdxTy = Legal->getWidestInductionType(); 2408 assert(IdxTy && "No type for induction"); 2409 2410 // The exit count might have the type of i64 while the phi is i32. This can 2411 // happen if we have an induction variable that is sign extended before the 2412 // compare. The only way that we get a backedge taken count is that the 2413 // induction variable was signed and as such will not overflow. In such a case 2414 // truncation is legal. 2415 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2416 IdxTy->getPrimitiveSizeInBits()) 2417 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2418 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2419 2420 // Get the total trip count from the count by adding 1. 2421 const SCEV *ExitCount = SE->getAddExpr( 2422 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2423 2424 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2425 2426 // Expand the trip count and place the new instructions in the preheader. 2427 // Notice that the pre-header does not change, only the loop body. 2428 SCEVExpander Exp(*SE, DL, "induction"); 2429 2430 // Count holds the overall loop count (N). 2431 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2432 L->getLoopPreheader()->getTerminator()); 2433 2434 if (TripCount->getType()->isPointerTy()) 2435 TripCount = 2436 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2437 L->getLoopPreheader()->getTerminator()); 2438 2439 return TripCount; 2440 } 2441 2442 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2443 if (VectorTripCount) 2444 return VectorTripCount; 2445 2446 Value *TC = getOrCreateTripCount(L); 2447 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2448 2449 Type *Ty = TC->getType(); 2450 Constant *Step = ConstantInt::get(Ty, VF * UF); 2451 2452 // If the tail is to be folded by masking, round the number of iterations N 2453 // up to a multiple of Step instead of rounding down. This is done by first 2454 // adding Step-1 and then rounding down. Note that it's ok if this addition 2455 // overflows: the vector induction variable will eventually wrap to zero given 2456 // that it starts at zero and its Step is a power of two; the loop will then 2457 // exit, with the last early-exit vector comparison also producing all-true. 2458 if (Cost->foldTailByMasking()) { 2459 assert(isPowerOf2_32(VF * UF) && 2460 "VF*UF must be a power of 2 when folding tail by masking"); 2461 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2462 } 2463 2464 // Now we need to generate the expression for the part of the loop that the 2465 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2466 // iterations are not required for correctness, or N - Step, otherwise. Step 2467 // is equal to the vectorization factor (number of SIMD elements) times the 2468 // unroll factor (number of SIMD instructions). 2469 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2470 2471 // If there is a non-reversed interleaved group that may speculatively access 2472 // memory out-of-bounds, we need to ensure that there will be at least one 2473 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2474 // the trip count, we set the remainder to be equal to the step. If the step 2475 // does not evenly divide the trip count, no adjustment is necessary since 2476 // there will already be scalar iterations. Note that the minimum iterations 2477 // check ensures that N >= Step. 2478 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2479 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2480 R = Builder.CreateSelect(IsZero, Step, R); 2481 } 2482 2483 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2484 2485 return VectorTripCount; 2486 } 2487 2488 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2489 const DataLayout &DL) { 2490 // Verify that V is a vector type with same number of elements as DstVTy. 2491 unsigned VF = DstVTy->getNumElements(); 2492 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2493 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2494 Type *SrcElemTy = SrcVecTy->getElementType(); 2495 Type *DstElemTy = DstVTy->getElementType(); 2496 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2497 "Vector elements must have same size"); 2498 2499 // Do a direct cast if element types are castable. 2500 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2501 return Builder.CreateBitOrPointerCast(V, DstVTy); 2502 } 2503 // V cannot be directly casted to desired vector type. 2504 // May happen when V is a floating point vector but DstVTy is a vector of 2505 // pointers or vice-versa. Handle this using a two-step bitcast using an 2506 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2507 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2508 "Only one type should be a pointer type"); 2509 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2510 "Only one type should be a floating point type"); 2511 Type *IntTy = 2512 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2513 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2514 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2515 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2516 } 2517 2518 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2519 BasicBlock *Bypass) { 2520 Value *Count = getOrCreateTripCount(L); 2521 BasicBlock *BB = L->getLoopPreheader(); 2522 IRBuilder<> Builder(BB->getTerminator()); 2523 2524 // Generate code to check if the loop's trip count is less than VF * UF, or 2525 // equal to it in case a scalar epilogue is required; this implies that the 2526 // vector trip count is zero. This check also covers the case where adding one 2527 // to the backedge-taken count overflowed leading to an incorrect trip count 2528 // of zero. In this case we will also jump to the scalar loop. 2529 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2530 : ICmpInst::ICMP_ULT; 2531 2532 // If tail is to be folded, vector loop takes care of all iterations. 2533 Value *CheckMinIters = Builder.getFalse(); 2534 if (!Cost->foldTailByMasking()) 2535 CheckMinIters = Builder.CreateICmp( 2536 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2537 "min.iters.check"); 2538 2539 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2540 // Update dominator tree immediately if the generated block is a 2541 // LoopBypassBlock because SCEV expansions to generate loop bypass 2542 // checks may query it before the current function is finished. 2543 DT->addNewBlock(NewBB, BB); 2544 if (L->getParentLoop()) 2545 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2546 ReplaceInstWithInst(BB->getTerminator(), 2547 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2548 LoopBypassBlocks.push_back(BB); 2549 } 2550 2551 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2552 BasicBlock *BB = L->getLoopPreheader(); 2553 2554 // Generate the code to check that the SCEV assumptions that we made. 2555 // We want the new basic block to start at the first instruction in a 2556 // sequence of instructions that form a check. 2557 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2558 "scev.check"); 2559 Value *SCEVCheck = 2560 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2561 2562 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2563 if (C->isZero()) 2564 return; 2565 2566 assert(!Cost->foldTailByMasking() && 2567 "Cannot SCEV check stride or overflow when folding tail"); 2568 // Create a new block containing the stride check. 2569 BB->setName("vector.scevcheck"); 2570 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2571 // Update dominator tree immediately if the generated block is a 2572 // LoopBypassBlock because SCEV expansions to generate loop bypass 2573 // checks may query it before the current function is finished. 2574 DT->addNewBlock(NewBB, BB); 2575 if (L->getParentLoop()) 2576 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2577 ReplaceInstWithInst(BB->getTerminator(), 2578 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2579 LoopBypassBlocks.push_back(BB); 2580 AddedSafetyChecks = true; 2581 } 2582 2583 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2584 // VPlan-native path does not do any analysis for runtime checks currently. 2585 if (EnableVPlanNativePath) 2586 return; 2587 2588 BasicBlock *BB = L->getLoopPreheader(); 2589 2590 // Generate the code that checks in runtime if arrays overlap. We put the 2591 // checks into a separate block to make the more common case of few elements 2592 // faster. 2593 Instruction *FirstCheckInst; 2594 Instruction *MemRuntimeCheck; 2595 std::tie(FirstCheckInst, MemRuntimeCheck) = 2596 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2597 if (!MemRuntimeCheck) 2598 return; 2599 2600 assert(!Cost->foldTailByMasking() && "Cannot check memory when folding tail"); 2601 // Create a new block containing the memory check. 2602 BB->setName("vector.memcheck"); 2603 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2604 // Update dominator tree immediately if the generated block is a 2605 // LoopBypassBlock because SCEV expansions to generate loop bypass 2606 // checks may query it before the current function is finished. 2607 DT->addNewBlock(NewBB, BB); 2608 if (L->getParentLoop()) 2609 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2610 ReplaceInstWithInst(BB->getTerminator(), 2611 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2612 LoopBypassBlocks.push_back(BB); 2613 AddedSafetyChecks = true; 2614 2615 // We currently don't use LoopVersioning for the actual loop cloning but we 2616 // still use it to add the noalias metadata. 2617 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2618 PSE.getSE()); 2619 LVer->prepareNoAliasMetadata(); 2620 } 2621 2622 Value *InnerLoopVectorizer::emitTransformedIndex( 2623 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2624 const InductionDescriptor &ID) const { 2625 2626 SCEVExpander Exp(*SE, DL, "induction"); 2627 auto Step = ID.getStep(); 2628 auto StartValue = ID.getStartValue(); 2629 assert(Index->getType() == Step->getType() && 2630 "Index type does not match StepValue type"); 2631 2632 // Note: the IR at this point is broken. We cannot use SE to create any new 2633 // SCEV and then expand it, hoping that SCEV's simplification will give us 2634 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2635 // lead to various SCEV crashes. So all we can do is to use builder and rely 2636 // on InstCombine for future simplifications. Here we handle some trivial 2637 // cases only. 2638 auto CreateAdd = [&B](Value *X, Value *Y) { 2639 assert(X->getType() == Y->getType() && "Types don't match!"); 2640 if (auto *CX = dyn_cast<ConstantInt>(X)) 2641 if (CX->isZero()) 2642 return Y; 2643 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2644 if (CY->isZero()) 2645 return X; 2646 return B.CreateAdd(X, Y); 2647 }; 2648 2649 auto CreateMul = [&B](Value *X, Value *Y) { 2650 assert(X->getType() == Y->getType() && "Types don't match!"); 2651 if (auto *CX = dyn_cast<ConstantInt>(X)) 2652 if (CX->isOne()) 2653 return Y; 2654 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2655 if (CY->isOne()) 2656 return X; 2657 return B.CreateMul(X, Y); 2658 }; 2659 2660 switch (ID.getKind()) { 2661 case InductionDescriptor::IK_IntInduction: { 2662 assert(Index->getType() == StartValue->getType() && 2663 "Index type does not match StartValue type"); 2664 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2665 return B.CreateSub(StartValue, Index); 2666 auto *Offset = CreateMul( 2667 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2668 return CreateAdd(StartValue, Offset); 2669 } 2670 case InductionDescriptor::IK_PtrInduction: { 2671 assert(isa<SCEVConstant>(Step) && 2672 "Expected constant step for pointer induction"); 2673 return B.CreateGEP( 2674 StartValue->getType()->getPointerElementType(), StartValue, 2675 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2676 &*B.GetInsertPoint()))); 2677 } 2678 case InductionDescriptor::IK_FpInduction: { 2679 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2680 auto InductionBinOp = ID.getInductionBinOp(); 2681 assert(InductionBinOp && 2682 (InductionBinOp->getOpcode() == Instruction::FAdd || 2683 InductionBinOp->getOpcode() == Instruction::FSub) && 2684 "Original bin op should be defined for FP induction"); 2685 2686 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2687 2688 // Floating point operations had to be 'fast' to enable the induction. 2689 FastMathFlags Flags; 2690 Flags.setFast(); 2691 2692 Value *MulExp = B.CreateFMul(StepValue, Index); 2693 if (isa<Instruction>(MulExp)) 2694 // We have to check, the MulExp may be a constant. 2695 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2696 2697 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2698 "induction"); 2699 if (isa<Instruction>(BOp)) 2700 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2701 2702 return BOp; 2703 } 2704 case InductionDescriptor::IK_NoInduction: 2705 return nullptr; 2706 } 2707 llvm_unreachable("invalid enum"); 2708 } 2709 2710 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2711 /* 2712 In this function we generate a new loop. The new loop will contain 2713 the vectorized instructions while the old loop will continue to run the 2714 scalar remainder. 2715 2716 [ ] <-- loop iteration number check. 2717 / | 2718 / v 2719 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2720 | / | 2721 | / v 2722 || [ ] <-- vector pre header. 2723 |/ | 2724 | v 2725 | [ ] \ 2726 | [ ]_| <-- vector loop. 2727 | | 2728 | v 2729 | -[ ] <--- middle-block. 2730 | / | 2731 | / v 2732 -|- >[ ] <--- new preheader. 2733 | | 2734 | v 2735 | [ ] \ 2736 | [ ]_| <-- old scalar loop to handle remainder. 2737 \ | 2738 \ v 2739 >[ ] <-- exit block. 2740 ... 2741 */ 2742 2743 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2744 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2745 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2746 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2747 assert(VectorPH && "Invalid loop structure"); 2748 assert(ExitBlock && "Must have an exit block"); 2749 2750 // Some loops have a single integer induction variable, while other loops 2751 // don't. One example is c++ iterators that often have multiple pointer 2752 // induction variables. In the code below we also support a case where we 2753 // don't have a single induction variable. 2754 // 2755 // We try to obtain an induction variable from the original loop as hard 2756 // as possible. However if we don't find one that: 2757 // - is an integer 2758 // - counts from zero, stepping by one 2759 // - is the size of the widest induction variable type 2760 // then we create a new one. 2761 OldInduction = Legal->getPrimaryInduction(); 2762 Type *IdxTy = Legal->getWidestInductionType(); 2763 2764 // Split the single block loop into the two loop structure described above. 2765 BasicBlock *VecBody = 2766 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2767 BasicBlock *MiddleBlock = 2768 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2769 BasicBlock *ScalarPH = 2770 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2771 2772 // Create and register the new vector loop. 2773 Loop *Lp = LI->AllocateLoop(); 2774 Loop *ParentLoop = OrigLoop->getParentLoop(); 2775 2776 // Insert the new loop into the loop nest and register the new basic blocks 2777 // before calling any utilities such as SCEV that require valid LoopInfo. 2778 if (ParentLoop) { 2779 ParentLoop->addChildLoop(Lp); 2780 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2781 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2782 } else { 2783 LI->addTopLevelLoop(Lp); 2784 } 2785 Lp->addBasicBlockToLoop(VecBody, *LI); 2786 2787 // Find the loop boundaries. 2788 Value *Count = getOrCreateTripCount(Lp); 2789 2790 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2791 2792 // Now, compare the new count to zero. If it is zero skip the vector loop and 2793 // jump to the scalar loop. This check also covers the case where the 2794 // backedge-taken count is uint##_max: adding one to it will overflow leading 2795 // to an incorrect trip count of zero. In this (rare) case we will also jump 2796 // to the scalar loop. 2797 emitMinimumIterationCountCheck(Lp, ScalarPH); 2798 2799 // Generate the code to check any assumptions that we've made for SCEV 2800 // expressions. 2801 emitSCEVChecks(Lp, ScalarPH); 2802 2803 // Generate the code that checks in runtime if arrays overlap. We put the 2804 // checks into a separate block to make the more common case of few elements 2805 // faster. 2806 emitMemRuntimeChecks(Lp, ScalarPH); 2807 2808 // Generate the induction variable. 2809 // The loop step is equal to the vectorization factor (num of SIMD elements) 2810 // times the unroll factor (num of SIMD instructions). 2811 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2812 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2813 Induction = 2814 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2815 getDebugLocFromInstOrOperands(OldInduction)); 2816 2817 // We are going to resume the execution of the scalar loop. 2818 // Go over all of the induction variables that we found and fix the 2819 // PHIs that are left in the scalar version of the loop. 2820 // The starting values of PHI nodes depend on the counter of the last 2821 // iteration in the vectorized loop. 2822 // If we come from a bypass edge then we need to start from the original 2823 // start value. 2824 2825 // This variable saves the new starting index for the scalar loop. It is used 2826 // to test if there are any tail iterations left once the vector loop has 2827 // completed. 2828 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2829 for (auto &InductionEntry : *List) { 2830 PHINode *OrigPhi = InductionEntry.first; 2831 InductionDescriptor II = InductionEntry.second; 2832 2833 // Create phi nodes to merge from the backedge-taken check block. 2834 PHINode *BCResumeVal = PHINode::Create( 2835 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 2836 // Copy original phi DL over to the new one. 2837 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 2838 Value *&EndValue = IVEndValues[OrigPhi]; 2839 if (OrigPhi == OldInduction) { 2840 // We know what the end value is. 2841 EndValue = CountRoundDown; 2842 } else { 2843 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 2844 Type *StepType = II.getStep()->getType(); 2845 Instruction::CastOps CastOp = 2846 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 2847 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 2848 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2849 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 2850 EndValue->setName("ind.end"); 2851 } 2852 2853 // The new PHI merges the original incoming value, in case of a bypass, 2854 // or the value at the end of the vectorized loop. 2855 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2856 2857 // Fix the scalar body counter (PHI node). 2858 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 2859 2860 // The old induction's phi node in the scalar body needs the truncated 2861 // value. 2862 for (BasicBlock *BB : LoopBypassBlocks) 2863 BCResumeVal->addIncoming(II.getStartValue(), BB); 2864 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 2865 } 2866 2867 // We need the OrigLoop (scalar loop part) latch terminator to help 2868 // produce correct debug info for the middle block BB instructions. 2869 // The legality check stage guarantees that the loop will have a single 2870 // latch. 2871 assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) && 2872 "Scalar loop latch terminator isn't a branch"); 2873 BranchInst *ScalarLatchBr = 2874 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()); 2875 2876 // Add a check in the middle block to see if we have completed 2877 // all of the iterations in the first vector loop. 2878 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2879 // If tail is to be folded, we know we don't need to run the remainder. 2880 Value *CmpN = Builder.getTrue(); 2881 if (!Cost->foldTailByMasking()) { 2882 CmpN = 2883 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2884 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 2885 2886 // Provide correct stepping behaviour by using the same DebugLoc as the 2887 // scalar loop latch branch cmp if it exists. 2888 if (CmpInst *ScalarLatchCmp = 2889 dyn_cast_or_null<CmpInst>(ScalarLatchBr->getCondition())) 2890 cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchCmp->getDebugLoc()); 2891 } 2892 2893 BranchInst *BrInst = BranchInst::Create(ExitBlock, ScalarPH, CmpN); 2894 BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc()); 2895 ReplaceInstWithInst(MiddleBlock->getTerminator(), BrInst); 2896 2897 // Get ready to start creating new instructions into the vectorized body. 2898 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2899 2900 // Save the state. 2901 LoopVectorPreHeader = Lp->getLoopPreheader(); 2902 LoopScalarPreHeader = ScalarPH; 2903 LoopMiddleBlock = MiddleBlock; 2904 LoopExitBlock = ExitBlock; 2905 LoopVectorBody = VecBody; 2906 LoopScalarBody = OldBasicBlock; 2907 2908 Optional<MDNode *> VectorizedLoopID = 2909 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 2910 LLVMLoopVectorizeFollowupVectorized}); 2911 if (VectorizedLoopID.hasValue()) { 2912 Lp->setLoopID(VectorizedLoopID.getValue()); 2913 2914 // Do not setAlreadyVectorized if loop attributes have been defined 2915 // explicitly. 2916 return LoopVectorPreHeader; 2917 } 2918 2919 // Keep all loop hints from the original loop on the vector loop (we'll 2920 // replace the vectorizer-specific hints below). 2921 if (MDNode *LID = OrigLoop->getLoopID()) 2922 Lp->setLoopID(LID); 2923 2924 LoopVectorizeHints Hints(Lp, true, *ORE); 2925 Hints.setAlreadyVectorized(); 2926 2927 return LoopVectorPreHeader; 2928 } 2929 2930 // Fix up external users of the induction variable. At this point, we are 2931 // in LCSSA form, with all external PHIs that use the IV having one input value, 2932 // coming from the remainder loop. We need those PHIs to also have a correct 2933 // value for the IV when arriving directly from the middle block. 2934 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 2935 const InductionDescriptor &II, 2936 Value *CountRoundDown, Value *EndValue, 2937 BasicBlock *MiddleBlock) { 2938 // There are two kinds of external IV usages - those that use the value 2939 // computed in the last iteration (the PHI) and those that use the penultimate 2940 // value (the value that feeds into the phi from the loop latch). 2941 // We allow both, but they, obviously, have different values. 2942 2943 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 2944 2945 DenseMap<Value *, Value *> MissingVals; 2946 2947 // An external user of the last iteration's value should see the value that 2948 // the remainder loop uses to initialize its own IV. 2949 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 2950 for (User *U : PostInc->users()) { 2951 Instruction *UI = cast<Instruction>(U); 2952 if (!OrigLoop->contains(UI)) { 2953 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2954 MissingVals[UI] = EndValue; 2955 } 2956 } 2957 2958 // An external user of the penultimate value need to see EndValue - Step. 2959 // The simplest way to get this is to recompute it from the constituent SCEVs, 2960 // that is Start + (Step * (CRD - 1)). 2961 for (User *U : OrigPhi->users()) { 2962 auto *UI = cast<Instruction>(U); 2963 if (!OrigLoop->contains(UI)) { 2964 const DataLayout &DL = 2965 OrigLoop->getHeader()->getModule()->getDataLayout(); 2966 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2967 2968 IRBuilder<> B(MiddleBlock->getTerminator()); 2969 Value *CountMinusOne = B.CreateSub( 2970 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 2971 Value *CMO = 2972 !II.getStep()->getType()->isIntegerTy() 2973 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 2974 II.getStep()->getType()) 2975 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 2976 CMO->setName("cast.cmo"); 2977 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 2978 Escape->setName("ind.escape"); 2979 MissingVals[UI] = Escape; 2980 } 2981 } 2982 2983 for (auto &I : MissingVals) { 2984 PHINode *PHI = cast<PHINode>(I.first); 2985 // One corner case we have to handle is two IVs "chasing" each-other, 2986 // that is %IV2 = phi [...], [ %IV1, %latch ] 2987 // In this case, if IV1 has an external use, we need to avoid adding both 2988 // "last value of IV1" and "penultimate value of IV2". So, verify that we 2989 // don't already have an incoming value for the middle block. 2990 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 2991 PHI->addIncoming(I.second, MiddleBlock); 2992 } 2993 } 2994 2995 namespace { 2996 2997 struct CSEDenseMapInfo { 2998 static bool canHandle(const Instruction *I) { 2999 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3000 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3001 } 3002 3003 static inline Instruction *getEmptyKey() { 3004 return DenseMapInfo<Instruction *>::getEmptyKey(); 3005 } 3006 3007 static inline Instruction *getTombstoneKey() { 3008 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3009 } 3010 3011 static unsigned getHashValue(const Instruction *I) { 3012 assert(canHandle(I) && "Unknown instruction!"); 3013 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3014 I->value_op_end())); 3015 } 3016 3017 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3018 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3019 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3020 return LHS == RHS; 3021 return LHS->isIdenticalTo(RHS); 3022 } 3023 }; 3024 3025 } // end anonymous namespace 3026 3027 ///Perform cse of induction variable instructions. 3028 static void cse(BasicBlock *BB) { 3029 // Perform simple cse. 3030 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3031 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3032 Instruction *In = &*I++; 3033 3034 if (!CSEDenseMapInfo::canHandle(In)) 3035 continue; 3036 3037 // Check if we can replace this instruction with any of the 3038 // visited instructions. 3039 if (Instruction *V = CSEMap.lookup(In)) { 3040 In->replaceAllUsesWith(V); 3041 In->eraseFromParent(); 3042 continue; 3043 } 3044 3045 CSEMap[In] = In; 3046 } 3047 } 3048 3049 /// Estimate the overhead of scalarizing an instruction. This is a 3050 /// convenience wrapper for the type-based getScalarizationOverhead API. 3051 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3052 const TargetTransformInfo &TTI) { 3053 if (VF == 1) 3054 return 0; 3055 3056 unsigned Cost = 0; 3057 Type *RetTy = ToVectorTy(I->getType(), VF); 3058 if (!RetTy->isVoidTy() && 3059 (!isa<LoadInst>(I) || 3060 !TTI.supportsEfficientVectorElementLoadStore())) 3061 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3062 3063 // Some targets keep addresses scalar. 3064 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 3065 return Cost; 3066 3067 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3068 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3069 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3070 } 3071 else if (!isa<StoreInst>(I) || 3072 !TTI.supportsEfficientVectorElementLoadStore()) { 3073 SmallVector<const Value *, 4> Operands(I->operand_values()); 3074 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3075 } 3076 3077 return Cost; 3078 } 3079 3080 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3081 // Return the cost of the instruction, including scalarization overhead if it's 3082 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3083 // i.e. either vector version isn't available, or is too expensive. 3084 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3085 const TargetTransformInfo &TTI, 3086 const TargetLibraryInfo *TLI, 3087 bool &NeedToScalarize) { 3088 Function *F = CI->getCalledFunction(); 3089 StringRef FnName = CI->getCalledFunction()->getName(); 3090 Type *ScalarRetTy = CI->getType(); 3091 SmallVector<Type *, 4> Tys, ScalarTys; 3092 for (auto &ArgOp : CI->arg_operands()) 3093 ScalarTys.push_back(ArgOp->getType()); 3094 3095 // Estimate cost of scalarized vector call. The source operands are assumed 3096 // to be vectors, so we need to extract individual elements from there, 3097 // execute VF scalar calls, and then gather the result into the vector return 3098 // value. 3099 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3100 if (VF == 1) 3101 return ScalarCallCost; 3102 3103 // Compute corresponding vector type for return value and arguments. 3104 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3105 for (Type *ScalarTy : ScalarTys) 3106 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3107 3108 // Compute costs of unpacking argument values for the scalar calls and 3109 // packing the return values to a vector. 3110 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3111 3112 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3113 3114 // If we can't emit a vector call for this function, then the currently found 3115 // cost is the cost we need to return. 3116 NeedToScalarize = true; 3117 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3118 return Cost; 3119 3120 // If the corresponding vector cost is cheaper, return its cost. 3121 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3122 if (VectorCallCost < Cost) { 3123 NeedToScalarize = false; 3124 return VectorCallCost; 3125 } 3126 return Cost; 3127 } 3128 3129 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3130 // factor VF. Return the cost of the instruction, including scalarization 3131 // overhead if it's needed. 3132 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3133 const TargetTransformInfo &TTI, 3134 const TargetLibraryInfo *TLI) { 3135 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3136 assert(ID && "Expected intrinsic call!"); 3137 3138 FastMathFlags FMF; 3139 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3140 FMF = FPMO->getFastMathFlags(); 3141 3142 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3143 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3144 } 3145 3146 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3147 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3148 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3149 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3150 } 3151 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3152 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3153 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3154 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3155 } 3156 3157 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3158 // For every instruction `I` in MinBWs, truncate the operands, create a 3159 // truncated version of `I` and reextend its result. InstCombine runs 3160 // later and will remove any ext/trunc pairs. 3161 SmallPtrSet<Value *, 4> Erased; 3162 for (const auto &KV : Cost->getMinimalBitwidths()) { 3163 // If the value wasn't vectorized, we must maintain the original scalar 3164 // type. The absence of the value from VectorLoopValueMap indicates that it 3165 // wasn't vectorized. 3166 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3167 continue; 3168 for (unsigned Part = 0; Part < UF; ++Part) { 3169 Value *I = getOrCreateVectorValue(KV.first, Part); 3170 if (Erased.find(I) != Erased.end() || I->use_empty() || 3171 !isa<Instruction>(I)) 3172 continue; 3173 Type *OriginalTy = I->getType(); 3174 Type *ScalarTruncatedTy = 3175 IntegerType::get(OriginalTy->getContext(), KV.second); 3176 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3177 OriginalTy->getVectorNumElements()); 3178 if (TruncatedTy == OriginalTy) 3179 continue; 3180 3181 IRBuilder<> B(cast<Instruction>(I)); 3182 auto ShrinkOperand = [&](Value *V) -> Value * { 3183 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3184 if (ZI->getSrcTy() == TruncatedTy) 3185 return ZI->getOperand(0); 3186 return B.CreateZExtOrTrunc(V, TruncatedTy); 3187 }; 3188 3189 // The actual instruction modification depends on the instruction type, 3190 // unfortunately. 3191 Value *NewI = nullptr; 3192 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3193 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3194 ShrinkOperand(BO->getOperand(1))); 3195 3196 // Any wrapping introduced by shrinking this operation shouldn't be 3197 // considered undefined behavior. So, we can't unconditionally copy 3198 // arithmetic wrapping flags to NewI. 3199 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3200 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3201 NewI = 3202 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3203 ShrinkOperand(CI->getOperand(1))); 3204 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3205 NewI = B.CreateSelect(SI->getCondition(), 3206 ShrinkOperand(SI->getTrueValue()), 3207 ShrinkOperand(SI->getFalseValue())); 3208 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3209 switch (CI->getOpcode()) { 3210 default: 3211 llvm_unreachable("Unhandled cast!"); 3212 case Instruction::Trunc: 3213 NewI = ShrinkOperand(CI->getOperand(0)); 3214 break; 3215 case Instruction::SExt: 3216 NewI = B.CreateSExtOrTrunc( 3217 CI->getOperand(0), 3218 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3219 break; 3220 case Instruction::ZExt: 3221 NewI = B.CreateZExtOrTrunc( 3222 CI->getOperand(0), 3223 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3224 break; 3225 } 3226 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3227 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3228 auto *O0 = B.CreateZExtOrTrunc( 3229 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3230 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3231 auto *O1 = B.CreateZExtOrTrunc( 3232 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3233 3234 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3235 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3236 // Don't do anything with the operands, just extend the result. 3237 continue; 3238 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3239 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3240 auto *O0 = B.CreateZExtOrTrunc( 3241 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3242 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3243 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3244 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3245 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3246 auto *O0 = B.CreateZExtOrTrunc( 3247 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3248 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3249 } else { 3250 // If we don't know what to do, be conservative and don't do anything. 3251 continue; 3252 } 3253 3254 // Lastly, extend the result. 3255 NewI->takeName(cast<Instruction>(I)); 3256 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3257 I->replaceAllUsesWith(Res); 3258 cast<Instruction>(I)->eraseFromParent(); 3259 Erased.insert(I); 3260 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3261 } 3262 } 3263 3264 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3265 for (const auto &KV : Cost->getMinimalBitwidths()) { 3266 // If the value wasn't vectorized, we must maintain the original scalar 3267 // type. The absence of the value from VectorLoopValueMap indicates that it 3268 // wasn't vectorized. 3269 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3270 continue; 3271 for (unsigned Part = 0; Part < UF; ++Part) { 3272 Value *I = getOrCreateVectorValue(KV.first, Part); 3273 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3274 if (Inst && Inst->use_empty()) { 3275 Value *NewI = Inst->getOperand(0); 3276 Inst->eraseFromParent(); 3277 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3278 } 3279 } 3280 } 3281 } 3282 3283 void InnerLoopVectorizer::fixVectorizedLoop() { 3284 // Insert truncates and extends for any truncated instructions as hints to 3285 // InstCombine. 3286 if (VF > 1) 3287 truncateToMinimalBitwidths(); 3288 3289 // Fix widened non-induction PHIs by setting up the PHI operands. 3290 if (OrigPHIsToFix.size()) { 3291 assert(EnableVPlanNativePath && 3292 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3293 fixNonInductionPHIs(); 3294 } 3295 3296 // At this point every instruction in the original loop is widened to a 3297 // vector form. Now we need to fix the recurrences in the loop. These PHI 3298 // nodes are currently empty because we did not want to introduce cycles. 3299 // This is the second stage of vectorizing recurrences. 3300 fixCrossIterationPHIs(); 3301 3302 // Update the dominator tree. 3303 // 3304 // FIXME: After creating the structure of the new loop, the dominator tree is 3305 // no longer up-to-date, and it remains that way until we update it 3306 // here. An out-of-date dominator tree is problematic for SCEV, 3307 // because SCEVExpander uses it to guide code generation. The 3308 // vectorizer use SCEVExpanders in several places. Instead, we should 3309 // keep the dominator tree up-to-date as we go. 3310 updateAnalysis(); 3311 3312 // Fix-up external users of the induction variables. 3313 for (auto &Entry : *Legal->getInductionVars()) 3314 fixupIVUsers(Entry.first, Entry.second, 3315 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3316 IVEndValues[Entry.first], LoopMiddleBlock); 3317 3318 fixLCSSAPHIs(); 3319 for (Instruction *PI : PredicatedInstructions) 3320 sinkScalarOperands(&*PI); 3321 3322 // Remove redundant induction instructions. 3323 cse(LoopVectorBody); 3324 } 3325 3326 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3327 // In order to support recurrences we need to be able to vectorize Phi nodes. 3328 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3329 // stage #2: We now need to fix the recurrences by adding incoming edges to 3330 // the currently empty PHI nodes. At this point every instruction in the 3331 // original loop is widened to a vector form so we can use them to construct 3332 // the incoming edges. 3333 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3334 // Handle first-order recurrences and reductions that need to be fixed. 3335 if (Legal->isFirstOrderRecurrence(&Phi)) 3336 fixFirstOrderRecurrence(&Phi); 3337 else if (Legal->isReductionVariable(&Phi)) 3338 fixReduction(&Phi); 3339 } 3340 } 3341 3342 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3343 // This is the second phase of vectorizing first-order recurrences. An 3344 // overview of the transformation is described below. Suppose we have the 3345 // following loop. 3346 // 3347 // for (int i = 0; i < n; ++i) 3348 // b[i] = a[i] - a[i - 1]; 3349 // 3350 // There is a first-order recurrence on "a". For this loop, the shorthand 3351 // scalar IR looks like: 3352 // 3353 // scalar.ph: 3354 // s_init = a[-1] 3355 // br scalar.body 3356 // 3357 // scalar.body: 3358 // i = phi [0, scalar.ph], [i+1, scalar.body] 3359 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3360 // s2 = a[i] 3361 // b[i] = s2 - s1 3362 // br cond, scalar.body, ... 3363 // 3364 // In this example, s1 is a recurrence because it's value depends on the 3365 // previous iteration. In the first phase of vectorization, we created a 3366 // temporary value for s1. We now complete the vectorization and produce the 3367 // shorthand vector IR shown below (for VF = 4, UF = 1). 3368 // 3369 // vector.ph: 3370 // v_init = vector(..., ..., ..., a[-1]) 3371 // br vector.body 3372 // 3373 // vector.body 3374 // i = phi [0, vector.ph], [i+4, vector.body] 3375 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3376 // v2 = a[i, i+1, i+2, i+3]; 3377 // v3 = vector(v1(3), v2(0, 1, 2)) 3378 // b[i, i+1, i+2, i+3] = v2 - v3 3379 // br cond, vector.body, middle.block 3380 // 3381 // middle.block: 3382 // x = v2(3) 3383 // br scalar.ph 3384 // 3385 // scalar.ph: 3386 // s_init = phi [x, middle.block], [a[-1], otherwise] 3387 // br scalar.body 3388 // 3389 // After execution completes the vector loop, we extract the next value of 3390 // the recurrence (x) to use as the initial value in the scalar loop. 3391 3392 // Get the original loop preheader and single loop latch. 3393 auto *Preheader = OrigLoop->getLoopPreheader(); 3394 auto *Latch = OrigLoop->getLoopLatch(); 3395 3396 // Get the initial and previous values of the scalar recurrence. 3397 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3398 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3399 3400 // Create a vector from the initial value. 3401 auto *VectorInit = ScalarInit; 3402 if (VF > 1) { 3403 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3404 VectorInit = Builder.CreateInsertElement( 3405 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3406 Builder.getInt32(VF - 1), "vector.recur.init"); 3407 } 3408 3409 // We constructed a temporary phi node in the first phase of vectorization. 3410 // This phi node will eventually be deleted. 3411 Builder.SetInsertPoint( 3412 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3413 3414 // Create a phi node for the new recurrence. The current value will either be 3415 // the initial value inserted into a vector or loop-varying vector value. 3416 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3417 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3418 3419 // Get the vectorized previous value of the last part UF - 1. It appears last 3420 // among all unrolled iterations, due to the order of their construction. 3421 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3422 3423 // Set the insertion point after the previous value if it is an instruction. 3424 // Note that the previous value may have been constant-folded so it is not 3425 // guaranteed to be an instruction in the vector loop. Also, if the previous 3426 // value is a phi node, we should insert after all the phi nodes to avoid 3427 // breaking basic block verification. 3428 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 3429 isa<PHINode>(PreviousLastPart)) 3430 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3431 else 3432 Builder.SetInsertPoint( 3433 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 3434 3435 // We will construct a vector for the recurrence by combining the values for 3436 // the current and previous iterations. This is the required shuffle mask. 3437 SmallVector<Constant *, 8> ShuffleMask(VF); 3438 ShuffleMask[0] = Builder.getInt32(VF - 1); 3439 for (unsigned I = 1; I < VF; ++I) 3440 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3441 3442 // The vector from which to take the initial value for the current iteration 3443 // (actual or unrolled). Initially, this is the vector phi node. 3444 Value *Incoming = VecPhi; 3445 3446 // Shuffle the current and previous vector and update the vector parts. 3447 for (unsigned Part = 0; Part < UF; ++Part) { 3448 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3449 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3450 auto *Shuffle = 3451 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3452 ConstantVector::get(ShuffleMask)) 3453 : Incoming; 3454 PhiPart->replaceAllUsesWith(Shuffle); 3455 cast<Instruction>(PhiPart)->eraseFromParent(); 3456 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3457 Incoming = PreviousPart; 3458 } 3459 3460 // Fix the latch value of the new recurrence in the vector loop. 3461 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3462 3463 // Extract the last vector element in the middle block. This will be the 3464 // initial value for the recurrence when jumping to the scalar loop. 3465 auto *ExtractForScalar = Incoming; 3466 if (VF > 1) { 3467 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3468 ExtractForScalar = Builder.CreateExtractElement( 3469 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3470 } 3471 // Extract the second last element in the middle block if the 3472 // Phi is used outside the loop. We need to extract the phi itself 3473 // and not the last element (the phi update in the current iteration). This 3474 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3475 // when the scalar loop is not run at all. 3476 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3477 if (VF > 1) 3478 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3479 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3480 // When loop is unrolled without vectorizing, initialize 3481 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3482 // `Incoming`. This is analogous to the vectorized case above: extracting the 3483 // second last element when VF > 1. 3484 else if (UF > 1) 3485 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3486 3487 // Fix the initial value of the original recurrence in the scalar loop. 3488 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3489 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3490 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3491 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3492 Start->addIncoming(Incoming, BB); 3493 } 3494 3495 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3496 Phi->setName("scalar.recur"); 3497 3498 // Finally, fix users of the recurrence outside the loop. The users will need 3499 // either the last value of the scalar recurrence or the last value of the 3500 // vector recurrence we extracted in the middle block. Since the loop is in 3501 // LCSSA form, we just need to find all the phi nodes for the original scalar 3502 // recurrence in the exit block, and then add an edge for the middle block. 3503 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3504 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3505 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3506 } 3507 } 3508 } 3509 3510 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3511 Constant *Zero = Builder.getInt32(0); 3512 3513 // Get it's reduction variable descriptor. 3514 assert(Legal->isReductionVariable(Phi) && 3515 "Unable to find the reduction variable"); 3516 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3517 3518 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3519 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3520 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3521 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3522 RdxDesc.getMinMaxRecurrenceKind(); 3523 setDebugLocFromInst(Builder, ReductionStartValue); 3524 3525 // We need to generate a reduction vector from the incoming scalar. 3526 // To do so, we need to generate the 'identity' vector and override 3527 // one of the elements with the incoming scalar reduction. We need 3528 // to do it in the vector-loop preheader. 3529 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3530 3531 // This is the vector-clone of the value that leaves the loop. 3532 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3533 3534 // Find the reduction identity variable. Zero for addition, or, xor, 3535 // one for multiplication, -1 for And. 3536 Value *Identity; 3537 Value *VectorStart; 3538 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3539 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3540 // MinMax reduction have the start value as their identify. 3541 if (VF == 1) { 3542 VectorStart = Identity = ReductionStartValue; 3543 } else { 3544 VectorStart = Identity = 3545 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3546 } 3547 } else { 3548 // Handle other reduction kinds: 3549 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3550 RK, VecTy->getScalarType()); 3551 if (VF == 1) { 3552 Identity = Iden; 3553 // This vector is the Identity vector where the first element is the 3554 // incoming scalar reduction. 3555 VectorStart = ReductionStartValue; 3556 } else { 3557 Identity = ConstantVector::getSplat(VF, Iden); 3558 3559 // This vector is the Identity vector where the first element is the 3560 // incoming scalar reduction. 3561 VectorStart = 3562 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3563 } 3564 } 3565 3566 // Fix the vector-loop phi. 3567 3568 // Reductions do not have to start at zero. They can start with 3569 // any loop invariant values. 3570 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3571 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3572 for (unsigned Part = 0; Part < UF; ++Part) { 3573 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3574 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3575 // Make sure to add the reduction stat value only to the 3576 // first unroll part. 3577 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3578 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3579 cast<PHINode>(VecRdxPhi) 3580 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3581 } 3582 3583 // Before each round, move the insertion point right between 3584 // the PHIs and the values we are going to write. 3585 // This allows us to write both PHINodes and the extractelement 3586 // instructions. 3587 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3588 3589 setDebugLocFromInst(Builder, LoopExitInst); 3590 3591 // If the vector reduction can be performed in a smaller type, we truncate 3592 // then extend the loop exit value to enable InstCombine to evaluate the 3593 // entire expression in the smaller type. 3594 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3595 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3596 Builder.SetInsertPoint( 3597 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3598 VectorParts RdxParts(UF); 3599 for (unsigned Part = 0; Part < UF; ++Part) { 3600 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3601 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3602 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3603 : Builder.CreateZExt(Trunc, VecTy); 3604 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3605 UI != RdxParts[Part]->user_end();) 3606 if (*UI != Trunc) { 3607 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3608 RdxParts[Part] = Extnd; 3609 } else { 3610 ++UI; 3611 } 3612 } 3613 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3614 for (unsigned Part = 0; Part < UF; ++Part) { 3615 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3616 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3617 } 3618 } 3619 3620 // Reduce all of the unrolled parts into a single vector. 3621 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3622 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3623 setDebugLocFromInst(Builder, ReducedPartRdx); 3624 for (unsigned Part = 1; Part < UF; ++Part) { 3625 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3626 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3627 // Floating point operations had to be 'fast' to enable the reduction. 3628 ReducedPartRdx = addFastMathFlag( 3629 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3630 ReducedPartRdx, "bin.rdx"), 3631 RdxDesc.getFastMathFlags()); 3632 else 3633 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3634 RdxPart); 3635 } 3636 3637 if (VF > 1) { 3638 bool NoNaN = Legal->hasFunNoNaNAttr(); 3639 ReducedPartRdx = 3640 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3641 // If the reduction can be performed in a smaller type, we need to extend 3642 // the reduction to the wider type before we branch to the original loop. 3643 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3644 ReducedPartRdx = 3645 RdxDesc.isSigned() 3646 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3647 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3648 } 3649 3650 // Create a phi node that merges control-flow from the backedge-taken check 3651 // block and the middle block. 3652 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3653 LoopScalarPreHeader->getTerminator()); 3654 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3655 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3656 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3657 3658 // Now, we need to fix the users of the reduction variable 3659 // inside and outside of the scalar remainder loop. 3660 // We know that the loop is in LCSSA form. We need to update the 3661 // PHI nodes in the exit blocks. 3662 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3663 // All PHINodes need to have a single entry edge, or two if 3664 // we already fixed them. 3665 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3666 3667 // We found a reduction value exit-PHI. Update it with the 3668 // incoming bypass edge. 3669 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3670 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3671 } // end of the LCSSA phi scan. 3672 3673 // Fix the scalar loop reduction variable with the incoming reduction sum 3674 // from the vector body and from the backedge value. 3675 int IncomingEdgeBlockIdx = 3676 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3677 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3678 // Pick the other block. 3679 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3680 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3681 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3682 } 3683 3684 void InnerLoopVectorizer::fixLCSSAPHIs() { 3685 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3686 if (LCSSAPhi.getNumIncomingValues() == 1) { 3687 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3688 // Non-instruction incoming values will have only one value. 3689 unsigned LastLane = 0; 3690 if (isa<Instruction>(IncomingValue)) 3691 LastLane = Cost->isUniformAfterVectorization( 3692 cast<Instruction>(IncomingValue), VF) 3693 ? 0 3694 : VF - 1; 3695 // Can be a loop invariant incoming value or the last scalar value to be 3696 // extracted from the vectorized loop. 3697 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3698 Value *lastIncomingValue = 3699 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3700 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3701 } 3702 } 3703 } 3704 3705 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3706 // The basic block and loop containing the predicated instruction. 3707 auto *PredBB = PredInst->getParent(); 3708 auto *VectorLoop = LI->getLoopFor(PredBB); 3709 3710 // Initialize a worklist with the operands of the predicated instruction. 3711 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3712 3713 // Holds instructions that we need to analyze again. An instruction may be 3714 // reanalyzed if we don't yet know if we can sink it or not. 3715 SmallVector<Instruction *, 8> InstsToReanalyze; 3716 3717 // Returns true if a given use occurs in the predicated block. Phi nodes use 3718 // their operands in their corresponding predecessor blocks. 3719 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3720 auto *I = cast<Instruction>(U.getUser()); 3721 BasicBlock *BB = I->getParent(); 3722 if (auto *Phi = dyn_cast<PHINode>(I)) 3723 BB = Phi->getIncomingBlock( 3724 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3725 return BB == PredBB; 3726 }; 3727 3728 // Iteratively sink the scalarized operands of the predicated instruction 3729 // into the block we created for it. When an instruction is sunk, it's 3730 // operands are then added to the worklist. The algorithm ends after one pass 3731 // through the worklist doesn't sink a single instruction. 3732 bool Changed; 3733 do { 3734 // Add the instructions that need to be reanalyzed to the worklist, and 3735 // reset the changed indicator. 3736 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 3737 InstsToReanalyze.clear(); 3738 Changed = false; 3739 3740 while (!Worklist.empty()) { 3741 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 3742 3743 // We can't sink an instruction if it is a phi node, is already in the 3744 // predicated block, is not in the loop, or may have side effects. 3745 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 3746 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 3747 continue; 3748 3749 // It's legal to sink the instruction if all its uses occur in the 3750 // predicated block. Otherwise, there's nothing to do yet, and we may 3751 // need to reanalyze the instruction. 3752 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 3753 InstsToReanalyze.push_back(I); 3754 continue; 3755 } 3756 3757 // Move the instruction to the beginning of the predicated block, and add 3758 // it's operands to the worklist. 3759 I->moveBefore(&*PredBB->getFirstInsertionPt()); 3760 Worklist.insert(I->op_begin(), I->op_end()); 3761 3762 // The sinking may have enabled other instructions to be sunk, so we will 3763 // need to iterate. 3764 Changed = true; 3765 } 3766 } while (Changed); 3767 } 3768 3769 void InnerLoopVectorizer::fixNonInductionPHIs() { 3770 for (PHINode *OrigPhi : OrigPHIsToFix) { 3771 PHINode *NewPhi = 3772 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 3773 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 3774 3775 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 3776 predecessors(OrigPhi->getParent())); 3777 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 3778 predecessors(NewPhi->getParent())); 3779 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 3780 "Scalar and Vector BB should have the same number of predecessors"); 3781 3782 // The insertion point in Builder may be invalidated by the time we get 3783 // here. Force the Builder insertion point to something valid so that we do 3784 // not run into issues during insertion point restore in 3785 // getOrCreateVectorValue calls below. 3786 Builder.SetInsertPoint(NewPhi); 3787 3788 // The predecessor order is preserved and we can rely on mapping between 3789 // scalar and vector block predecessors. 3790 for (unsigned i = 0; i < NumIncomingValues; ++i) { 3791 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 3792 3793 // When looking up the new scalar/vector values to fix up, use incoming 3794 // values from original phi. 3795 Value *ScIncV = 3796 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 3797 3798 // Scalar incoming value may need a broadcast 3799 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 3800 NewPhi->addIncoming(NewIncV, NewPredBB); 3801 } 3802 } 3803 } 3804 3805 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 3806 unsigned VF) { 3807 PHINode *P = cast<PHINode>(PN); 3808 if (EnableVPlanNativePath) { 3809 // Currently we enter here in the VPlan-native path for non-induction 3810 // PHIs where all control flow is uniform. We simply widen these PHIs. 3811 // Create a vector phi with no operands - the vector phi operands will be 3812 // set at the end of vector code generation. 3813 Type *VecTy = 3814 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3815 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 3816 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 3817 OrigPHIsToFix.push_back(P); 3818 3819 return; 3820 } 3821 3822 assert(PN->getParent() == OrigLoop->getHeader() && 3823 "Non-header phis should have been handled elsewhere"); 3824 3825 // In order to support recurrences we need to be able to vectorize Phi nodes. 3826 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3827 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 3828 // this value when we vectorize all of the instructions that use the PHI. 3829 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 3830 for (unsigned Part = 0; Part < UF; ++Part) { 3831 // This is phase one of vectorizing PHIs. 3832 Type *VecTy = 3833 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3834 Value *EntryPart = PHINode::Create( 3835 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 3836 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 3837 } 3838 return; 3839 } 3840 3841 setDebugLocFromInst(Builder, P); 3842 3843 // This PHINode must be an induction variable. 3844 // Make sure that we know about it. 3845 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 3846 3847 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3848 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3849 3850 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3851 // which can be found from the original scalar operations. 3852 switch (II.getKind()) { 3853 case InductionDescriptor::IK_NoInduction: 3854 llvm_unreachable("Unknown induction"); 3855 case InductionDescriptor::IK_IntInduction: 3856 case InductionDescriptor::IK_FpInduction: 3857 llvm_unreachable("Integer/fp induction is handled elsewhere."); 3858 case InductionDescriptor::IK_PtrInduction: { 3859 // Handle the pointer induction variable case. 3860 assert(P->getType()->isPointerTy() && "Unexpected type."); 3861 // This is the normalized GEP that starts counting at zero. 3862 Value *PtrInd = Induction; 3863 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 3864 // Determine the number of scalars we need to generate for each unroll 3865 // iteration. If the instruction is uniform, we only need to generate the 3866 // first lane. Otherwise, we generate all VF values. 3867 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 3868 // These are the scalar results. Notice that we don't generate vector GEPs 3869 // because scalar GEPs result in better code. 3870 for (unsigned Part = 0; Part < UF; ++Part) { 3871 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3872 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 3873 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3874 Value *SclrGep = 3875 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 3876 SclrGep->setName("next.gep"); 3877 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 3878 } 3879 } 3880 return; 3881 } 3882 } 3883 } 3884 3885 /// A helper function for checking whether an integer division-related 3886 /// instruction may divide by zero (in which case it must be predicated if 3887 /// executed conditionally in the scalar code). 3888 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 3889 /// Non-zero divisors that are non compile-time constants will not be 3890 /// converted into multiplication, so we will still end up scalarizing 3891 /// the division, but can do so w/o predication. 3892 static bool mayDivideByZero(Instruction &I) { 3893 assert((I.getOpcode() == Instruction::UDiv || 3894 I.getOpcode() == Instruction::SDiv || 3895 I.getOpcode() == Instruction::URem || 3896 I.getOpcode() == Instruction::SRem) && 3897 "Unexpected instruction"); 3898 Value *Divisor = I.getOperand(1); 3899 auto *CInt = dyn_cast<ConstantInt>(Divisor); 3900 return !CInt || CInt->isZero(); 3901 } 3902 3903 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 3904 switch (I.getOpcode()) { 3905 case Instruction::Br: 3906 case Instruction::PHI: 3907 llvm_unreachable("This instruction is handled by a different recipe."); 3908 case Instruction::GetElementPtr: { 3909 // Construct a vector GEP by widening the operands of the scalar GEP as 3910 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 3911 // results in a vector of pointers when at least one operand of the GEP 3912 // is vector-typed. Thus, to keep the representation compact, we only use 3913 // vector-typed operands for loop-varying values. 3914 auto *GEP = cast<GetElementPtrInst>(&I); 3915 3916 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 3917 // If we are vectorizing, but the GEP has only loop-invariant operands, 3918 // the GEP we build (by only using vector-typed operands for 3919 // loop-varying values) would be a scalar pointer. Thus, to ensure we 3920 // produce a vector of pointers, we need to either arbitrarily pick an 3921 // operand to broadcast, or broadcast a clone of the original GEP. 3922 // Here, we broadcast a clone of the original. 3923 // 3924 // TODO: If at some point we decide to scalarize instructions having 3925 // loop-invariant operands, this special case will no longer be 3926 // required. We would add the scalarization decision to 3927 // collectLoopScalars() and teach getVectorValue() to broadcast 3928 // the lane-zero scalar value. 3929 auto *Clone = Builder.Insert(GEP->clone()); 3930 for (unsigned Part = 0; Part < UF; ++Part) { 3931 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 3932 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 3933 addMetadata(EntryPart, GEP); 3934 } 3935 } else { 3936 // If the GEP has at least one loop-varying operand, we are sure to 3937 // produce a vector of pointers. But if we are only unrolling, we want 3938 // to produce a scalar GEP for each unroll part. Thus, the GEP we 3939 // produce with the code below will be scalar (if VF == 1) or vector 3940 // (otherwise). Note that for the unroll-only case, we still maintain 3941 // values in the vector mapping with initVector, as we do for other 3942 // instructions. 3943 for (unsigned Part = 0; Part < UF; ++Part) { 3944 // The pointer operand of the new GEP. If it's loop-invariant, we 3945 // won't broadcast it. 3946 auto *Ptr = 3947 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 3948 ? GEP->getPointerOperand() 3949 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 3950 3951 // Collect all the indices for the new GEP. If any index is 3952 // loop-invariant, we won't broadcast it. 3953 SmallVector<Value *, 4> Indices; 3954 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 3955 if (OrigLoop->isLoopInvariant(U.get())) 3956 Indices.push_back(U.get()); 3957 else 3958 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 3959 } 3960 3961 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 3962 // but it should be a vector, otherwise. 3963 auto *NewGEP = 3964 GEP->isInBounds() 3965 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 3966 Indices) 3967 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 3968 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 3969 "NewGEP is not a pointer vector"); 3970 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 3971 addMetadata(NewGEP, GEP); 3972 } 3973 } 3974 3975 break; 3976 } 3977 case Instruction::UDiv: 3978 case Instruction::SDiv: 3979 case Instruction::SRem: 3980 case Instruction::URem: 3981 case Instruction::Add: 3982 case Instruction::FAdd: 3983 case Instruction::Sub: 3984 case Instruction::FSub: 3985 case Instruction::Mul: 3986 case Instruction::FMul: 3987 case Instruction::FDiv: 3988 case Instruction::FRem: 3989 case Instruction::Shl: 3990 case Instruction::LShr: 3991 case Instruction::AShr: 3992 case Instruction::And: 3993 case Instruction::Or: 3994 case Instruction::Xor: { 3995 // Just widen binops. 3996 auto *BinOp = cast<BinaryOperator>(&I); 3997 setDebugLocFromInst(Builder, BinOp); 3998 3999 for (unsigned Part = 0; Part < UF; ++Part) { 4000 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 4001 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 4002 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 4003 4004 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4005 VecOp->copyIRFlags(BinOp); 4006 4007 // Use this vector value for all users of the original instruction. 4008 VectorLoopValueMap.setVectorValue(&I, Part, V); 4009 addMetadata(V, BinOp); 4010 } 4011 4012 break; 4013 } 4014 case Instruction::Select: { 4015 // Widen selects. 4016 // If the selector is loop invariant we can create a select 4017 // instruction with a scalar condition. Otherwise, use vector-select. 4018 auto *SE = PSE.getSE(); 4019 bool InvariantCond = 4020 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4021 setDebugLocFromInst(Builder, &I); 4022 4023 // The condition can be loop invariant but still defined inside the 4024 // loop. This means that we can't just use the original 'cond' value. 4025 // We have to take the 'vectorized' value and pick the first lane. 4026 // Instcombine will make this a no-op. 4027 4028 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4029 4030 for (unsigned Part = 0; Part < UF; ++Part) { 4031 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4032 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4033 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4034 Value *Sel = 4035 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4036 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4037 addMetadata(Sel, &I); 4038 } 4039 4040 break; 4041 } 4042 4043 case Instruction::ICmp: 4044 case Instruction::FCmp: { 4045 // Widen compares. Generate vector compares. 4046 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4047 auto *Cmp = dyn_cast<CmpInst>(&I); 4048 setDebugLocFromInst(Builder, Cmp); 4049 for (unsigned Part = 0; Part < UF; ++Part) { 4050 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4051 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4052 Value *C = nullptr; 4053 if (FCmp) { 4054 // Propagate fast math flags. 4055 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4056 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4057 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4058 } else { 4059 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4060 } 4061 VectorLoopValueMap.setVectorValue(&I, Part, C); 4062 addMetadata(C, &I); 4063 } 4064 4065 break; 4066 } 4067 4068 case Instruction::ZExt: 4069 case Instruction::SExt: 4070 case Instruction::FPToUI: 4071 case Instruction::FPToSI: 4072 case Instruction::FPExt: 4073 case Instruction::PtrToInt: 4074 case Instruction::IntToPtr: 4075 case Instruction::SIToFP: 4076 case Instruction::UIToFP: 4077 case Instruction::Trunc: 4078 case Instruction::FPTrunc: 4079 case Instruction::BitCast: { 4080 auto *CI = dyn_cast<CastInst>(&I); 4081 setDebugLocFromInst(Builder, CI); 4082 4083 /// Vectorize casts. 4084 Type *DestTy = 4085 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4086 4087 for (unsigned Part = 0; Part < UF; ++Part) { 4088 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4089 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4090 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4091 addMetadata(Cast, &I); 4092 } 4093 break; 4094 } 4095 4096 case Instruction::Call: { 4097 // Ignore dbg intrinsics. 4098 if (isa<DbgInfoIntrinsic>(I)) 4099 break; 4100 setDebugLocFromInst(Builder, &I); 4101 4102 Module *M = I.getParent()->getParent()->getParent(); 4103 auto *CI = cast<CallInst>(&I); 4104 4105 StringRef FnName = CI->getCalledFunction()->getName(); 4106 Function *F = CI->getCalledFunction(); 4107 Type *RetTy = ToVectorTy(CI->getType(), VF); 4108 SmallVector<Type *, 4> Tys; 4109 for (Value *ArgOperand : CI->arg_operands()) 4110 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4111 4112 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4113 4114 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4115 // version of the instruction. 4116 // Is it beneficial to perform intrinsic call compared to lib call? 4117 bool NeedToScalarize; 4118 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4119 bool UseVectorIntrinsic = 4120 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4121 assert((UseVectorIntrinsic || !NeedToScalarize) && 4122 "Instruction should be scalarized elsewhere."); 4123 4124 for (unsigned Part = 0; Part < UF; ++Part) { 4125 SmallVector<Value *, 4> Args; 4126 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4127 Value *Arg = CI->getArgOperand(i); 4128 // Some intrinsics have a scalar argument - don't replace it with a 4129 // vector. 4130 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4131 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4132 Args.push_back(Arg); 4133 } 4134 4135 Function *VectorF; 4136 if (UseVectorIntrinsic) { 4137 // Use vector version of the intrinsic. 4138 Type *TysForDecl[] = {CI->getType()}; 4139 if (VF > 1) 4140 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4141 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4142 } else { 4143 // Use vector version of the library call. 4144 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4145 assert(!VFnName.empty() && "Vector function name is empty."); 4146 VectorF = M->getFunction(VFnName); 4147 if (!VectorF) { 4148 // Generate a declaration 4149 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4150 VectorF = 4151 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4152 VectorF->copyAttributesFrom(F); 4153 } 4154 } 4155 assert(VectorF && "Can't create vector function."); 4156 4157 SmallVector<OperandBundleDef, 1> OpBundles; 4158 CI->getOperandBundlesAsDefs(OpBundles); 4159 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4160 4161 if (isa<FPMathOperator>(V)) 4162 V->copyFastMathFlags(CI); 4163 4164 VectorLoopValueMap.setVectorValue(&I, Part, V); 4165 addMetadata(V, &I); 4166 } 4167 4168 break; 4169 } 4170 4171 default: 4172 // This instruction is not vectorized by simple widening. 4173 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4174 llvm_unreachable("Unhandled instruction!"); 4175 } // end of switch. 4176 } 4177 4178 void InnerLoopVectorizer::updateAnalysis() { 4179 // Forget the original basic block. 4180 PSE.getSE()->forgetLoop(OrigLoop); 4181 4182 // DT is not kept up-to-date for outer loop vectorization 4183 if (EnableVPlanNativePath) 4184 return; 4185 4186 // Update the dominator tree information. 4187 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4188 "Entry does not dominate exit."); 4189 4190 DT->addNewBlock(LoopMiddleBlock, 4191 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4192 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4193 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4194 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4195 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 4196 } 4197 4198 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4199 // We should not collect Scalars more than once per VF. Right now, this 4200 // function is called from collectUniformsAndScalars(), which already does 4201 // this check. Collecting Scalars for VF=1 does not make any sense. 4202 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4203 "This function should not be visited twice for the same VF"); 4204 4205 SmallSetVector<Instruction *, 8> Worklist; 4206 4207 // These sets are used to seed the analysis with pointers used by memory 4208 // accesses that will remain scalar. 4209 SmallSetVector<Instruction *, 8> ScalarPtrs; 4210 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4211 4212 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4213 // The pointer operands of loads and stores will be scalar as long as the 4214 // memory access is not a gather or scatter operation. The value operand of a 4215 // store will remain scalar if the store is scalarized. 4216 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4217 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4218 assert(WideningDecision != CM_Unknown && 4219 "Widening decision should be ready at this moment"); 4220 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4221 if (Ptr == Store->getValueOperand()) 4222 return WideningDecision == CM_Scalarize; 4223 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4224 "Ptr is neither a value or pointer operand"); 4225 return WideningDecision != CM_GatherScatter; 4226 }; 4227 4228 // A helper that returns true if the given value is a bitcast or 4229 // getelementptr instruction contained in the loop. 4230 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4231 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4232 isa<GetElementPtrInst>(V)) && 4233 !TheLoop->isLoopInvariant(V); 4234 }; 4235 4236 // A helper that evaluates a memory access's use of a pointer. If the use 4237 // will be a scalar use, and the pointer is only used by memory accesses, we 4238 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4239 // PossibleNonScalarPtrs. 4240 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4241 // We only care about bitcast and getelementptr instructions contained in 4242 // the loop. 4243 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4244 return; 4245 4246 // If the pointer has already been identified as scalar (e.g., if it was 4247 // also identified as uniform), there's nothing to do. 4248 auto *I = cast<Instruction>(Ptr); 4249 if (Worklist.count(I)) 4250 return; 4251 4252 // If the use of the pointer will be a scalar use, and all users of the 4253 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4254 // place the pointer in PossibleNonScalarPtrs. 4255 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4256 return isa<LoadInst>(U) || isa<StoreInst>(U); 4257 })) 4258 ScalarPtrs.insert(I); 4259 else 4260 PossibleNonScalarPtrs.insert(I); 4261 }; 4262 4263 // We seed the scalars analysis with three classes of instructions: (1) 4264 // instructions marked uniform-after-vectorization, (2) bitcast and 4265 // getelementptr instructions used by memory accesses requiring a scalar use, 4266 // and (3) pointer induction variables and their update instructions (we 4267 // currently only scalarize these). 4268 // 4269 // (1) Add to the worklist all instructions that have been identified as 4270 // uniform-after-vectorization. 4271 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4272 4273 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4274 // memory accesses requiring a scalar use. The pointer operands of loads and 4275 // stores will be scalar as long as the memory accesses is not a gather or 4276 // scatter operation. The value operand of a store will remain scalar if the 4277 // store is scalarized. 4278 for (auto *BB : TheLoop->blocks()) 4279 for (auto &I : *BB) { 4280 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4281 evaluatePtrUse(Load, Load->getPointerOperand()); 4282 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4283 evaluatePtrUse(Store, Store->getPointerOperand()); 4284 evaluatePtrUse(Store, Store->getValueOperand()); 4285 } 4286 } 4287 for (auto *I : ScalarPtrs) 4288 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4289 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4290 Worklist.insert(I); 4291 } 4292 4293 // (3) Add to the worklist all pointer induction variables and their update 4294 // instructions. 4295 // 4296 // TODO: Once we are able to vectorize pointer induction variables we should 4297 // no longer insert them into the worklist here. 4298 auto *Latch = TheLoop->getLoopLatch(); 4299 for (auto &Induction : *Legal->getInductionVars()) { 4300 auto *Ind = Induction.first; 4301 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4302 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4303 continue; 4304 Worklist.insert(Ind); 4305 Worklist.insert(IndUpdate); 4306 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4307 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4308 << "\n"); 4309 } 4310 4311 // Insert the forced scalars. 4312 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4313 // induction variable when the PHI user is scalarized. 4314 auto ForcedScalar = ForcedScalars.find(VF); 4315 if (ForcedScalar != ForcedScalars.end()) 4316 for (auto *I : ForcedScalar->second) 4317 Worklist.insert(I); 4318 4319 // Expand the worklist by looking through any bitcasts and getelementptr 4320 // instructions we've already identified as scalar. This is similar to the 4321 // expansion step in collectLoopUniforms(); however, here we're only 4322 // expanding to include additional bitcasts and getelementptr instructions. 4323 unsigned Idx = 0; 4324 while (Idx != Worklist.size()) { 4325 Instruction *Dst = Worklist[Idx++]; 4326 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4327 continue; 4328 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4329 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4330 auto *J = cast<Instruction>(U); 4331 return !TheLoop->contains(J) || Worklist.count(J) || 4332 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4333 isScalarUse(J, Src)); 4334 })) { 4335 Worklist.insert(Src); 4336 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4337 } 4338 } 4339 4340 // An induction variable will remain scalar if all users of the induction 4341 // variable and induction variable update remain scalar. 4342 for (auto &Induction : *Legal->getInductionVars()) { 4343 auto *Ind = Induction.first; 4344 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4345 4346 // We already considered pointer induction variables, so there's no reason 4347 // to look at their users again. 4348 // 4349 // TODO: Once we are able to vectorize pointer induction variables we 4350 // should no longer skip over them here. 4351 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4352 continue; 4353 4354 // Determine if all users of the induction variable are scalar after 4355 // vectorization. 4356 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4357 auto *I = cast<Instruction>(U); 4358 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4359 }); 4360 if (!ScalarInd) 4361 continue; 4362 4363 // Determine if all users of the induction variable update instruction are 4364 // scalar after vectorization. 4365 auto ScalarIndUpdate = 4366 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4367 auto *I = cast<Instruction>(U); 4368 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4369 }); 4370 if (!ScalarIndUpdate) 4371 continue; 4372 4373 // The induction variable and its update instruction will remain scalar. 4374 Worklist.insert(Ind); 4375 Worklist.insert(IndUpdate); 4376 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4377 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4378 << "\n"); 4379 } 4380 4381 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4382 } 4383 4384 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4385 if (!blockNeedsPredication(I->getParent())) 4386 return false; 4387 switch(I->getOpcode()) { 4388 default: 4389 break; 4390 case Instruction::Load: 4391 case Instruction::Store: { 4392 if (!Legal->isMaskRequired(I)) 4393 return false; 4394 auto *Ptr = getLoadStorePointerOperand(I); 4395 auto *Ty = getMemInstValueType(I); 4396 // We have already decided how to vectorize this instruction, get that 4397 // result. 4398 if (VF > 1) { 4399 InstWidening WideningDecision = getWideningDecision(I, VF); 4400 assert(WideningDecision != CM_Unknown && 4401 "Widening decision should be ready at this moment"); 4402 return WideningDecision == CM_Scalarize; 4403 } 4404 return isa<LoadInst>(I) ? 4405 !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) 4406 : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty)); 4407 } 4408 case Instruction::UDiv: 4409 case Instruction::SDiv: 4410 case Instruction::SRem: 4411 case Instruction::URem: 4412 return mayDivideByZero(*I); 4413 } 4414 return false; 4415 } 4416 4417 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4418 unsigned VF) { 4419 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4420 assert(getWideningDecision(I, VF) == CM_Unknown && 4421 "Decision should not be set yet."); 4422 auto *Group = getInterleavedAccessGroup(I); 4423 assert(Group && "Must have a group."); 4424 4425 // Check if masking is required. 4426 // A Group may need masking for one of two reasons: it resides in a block that 4427 // needs predication, or it was decided to use masking to deal with gaps. 4428 bool PredicatedAccessRequiresMasking = 4429 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4430 bool AccessWithGapsRequiresMasking = 4431 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 4432 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4433 return true; 4434 4435 // If masked interleaving is required, we expect that the user/target had 4436 // enabled it, because otherwise it either wouldn't have been created or 4437 // it should have been invalidated by the CostModel. 4438 assert(useMaskedInterleavedAccesses(TTI) && 4439 "Masked interleave-groups for predicated accesses are not enabled."); 4440 4441 auto *Ty = getMemInstValueType(I); 4442 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty) 4443 : TTI.isLegalMaskedStore(Ty); 4444 } 4445 4446 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4447 unsigned VF) { 4448 // Get and ensure we have a valid memory instruction. 4449 LoadInst *LI = dyn_cast<LoadInst>(I); 4450 StoreInst *SI = dyn_cast<StoreInst>(I); 4451 assert((LI || SI) && "Invalid memory instruction"); 4452 4453 auto *Ptr = getLoadStorePointerOperand(I); 4454 4455 // In order to be widened, the pointer should be consecutive, first of all. 4456 if (!Legal->isConsecutivePtr(Ptr)) 4457 return false; 4458 4459 // If the instruction is a store located in a predicated block, it will be 4460 // scalarized. 4461 if (isScalarWithPredication(I)) 4462 return false; 4463 4464 // If the instruction's allocated size doesn't equal it's type size, it 4465 // requires padding and will be scalarized. 4466 auto &DL = I->getModule()->getDataLayout(); 4467 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4468 if (hasIrregularType(ScalarTy, DL, VF)) 4469 return false; 4470 4471 return true; 4472 } 4473 4474 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4475 // We should not collect Uniforms more than once per VF. Right now, 4476 // this function is called from collectUniformsAndScalars(), which 4477 // already does this check. Collecting Uniforms for VF=1 does not make any 4478 // sense. 4479 4480 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4481 "This function should not be visited twice for the same VF"); 4482 4483 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4484 // not analyze again. Uniforms.count(VF) will return 1. 4485 Uniforms[VF].clear(); 4486 4487 // We now know that the loop is vectorizable! 4488 // Collect instructions inside the loop that will remain uniform after 4489 // vectorization. 4490 4491 // Global values, params and instructions outside of current loop are out of 4492 // scope. 4493 auto isOutOfScope = [&](Value *V) -> bool { 4494 Instruction *I = dyn_cast<Instruction>(V); 4495 return (!I || !TheLoop->contains(I)); 4496 }; 4497 4498 SetVector<Instruction *> Worklist; 4499 BasicBlock *Latch = TheLoop->getLoopLatch(); 4500 4501 // Start with the conditional branch. If the branch condition is an 4502 // instruction contained in the loop that is only used by the branch, it is 4503 // uniform. 4504 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4505 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 4506 Worklist.insert(Cmp); 4507 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 4508 } 4509 4510 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4511 // are pointers that are treated like consecutive pointers during 4512 // vectorization. The pointer operands of interleaved accesses are an 4513 // example. 4514 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4515 4516 // Holds pointer operands of instructions that are possibly non-uniform. 4517 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4518 4519 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4520 InstWidening WideningDecision = getWideningDecision(I, VF); 4521 assert(WideningDecision != CM_Unknown && 4522 "Widening decision should be ready at this moment"); 4523 4524 return (WideningDecision == CM_Widen || 4525 WideningDecision == CM_Widen_Reverse || 4526 WideningDecision == CM_Interleave); 4527 }; 4528 // Iterate over the instructions in the loop, and collect all 4529 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4530 // that a consecutive-like pointer operand will be scalarized, we collect it 4531 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4532 // getelementptr instruction can be used by both vectorized and scalarized 4533 // memory instructions. For example, if a loop loads and stores from the same 4534 // location, but the store is conditional, the store will be scalarized, and 4535 // the getelementptr won't remain uniform. 4536 for (auto *BB : TheLoop->blocks()) 4537 for (auto &I : *BB) { 4538 // If there's no pointer operand, there's nothing to do. 4539 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4540 if (!Ptr) 4541 continue; 4542 4543 // True if all users of Ptr are memory accesses that have Ptr as their 4544 // pointer operand. 4545 auto UsersAreMemAccesses = 4546 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4547 return getLoadStorePointerOperand(U) == Ptr; 4548 }); 4549 4550 // Ensure the memory instruction will not be scalarized or used by 4551 // gather/scatter, making its pointer operand non-uniform. If the pointer 4552 // operand is used by any instruction other than a memory access, we 4553 // conservatively assume the pointer operand may be non-uniform. 4554 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4555 PossibleNonUniformPtrs.insert(Ptr); 4556 4557 // If the memory instruction will be vectorized and its pointer operand 4558 // is consecutive-like, or interleaving - the pointer operand should 4559 // remain uniform. 4560 else 4561 ConsecutiveLikePtrs.insert(Ptr); 4562 } 4563 4564 // Add to the Worklist all consecutive and consecutive-like pointers that 4565 // aren't also identified as possibly non-uniform. 4566 for (auto *V : ConsecutiveLikePtrs) 4567 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) { 4568 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 4569 Worklist.insert(V); 4570 } 4571 4572 // Expand Worklist in topological order: whenever a new instruction 4573 // is added , its users should be already inside Worklist. It ensures 4574 // a uniform instruction will only be used by uniform instructions. 4575 unsigned idx = 0; 4576 while (idx != Worklist.size()) { 4577 Instruction *I = Worklist[idx++]; 4578 4579 for (auto OV : I->operand_values()) { 4580 // isOutOfScope operands cannot be uniform instructions. 4581 if (isOutOfScope(OV)) 4582 continue; 4583 // First order recurrence Phi's should typically be considered 4584 // non-uniform. 4585 auto *OP = dyn_cast<PHINode>(OV); 4586 if (OP && Legal->isFirstOrderRecurrence(OP)) 4587 continue; 4588 // If all the users of the operand are uniform, then add the 4589 // operand into the uniform worklist. 4590 auto *OI = cast<Instruction>(OV); 4591 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4592 auto *J = cast<Instruction>(U); 4593 return Worklist.count(J) || 4594 (OI == getLoadStorePointerOperand(J) && 4595 isUniformDecision(J, VF)); 4596 })) { 4597 Worklist.insert(OI); 4598 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 4599 } 4600 } 4601 } 4602 4603 // Returns true if Ptr is the pointer operand of a memory access instruction 4604 // I, and I is known to not require scalarization. 4605 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4606 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4607 }; 4608 4609 // For an instruction to be added into Worklist above, all its users inside 4610 // the loop should also be in Worklist. However, this condition cannot be 4611 // true for phi nodes that form a cyclic dependence. We must process phi 4612 // nodes separately. An induction variable will remain uniform if all users 4613 // of the induction variable and induction variable update remain uniform. 4614 // The code below handles both pointer and non-pointer induction variables. 4615 for (auto &Induction : *Legal->getInductionVars()) { 4616 auto *Ind = Induction.first; 4617 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4618 4619 // Determine if all users of the induction variable are uniform after 4620 // vectorization. 4621 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4622 auto *I = cast<Instruction>(U); 4623 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4624 isVectorizedMemAccessUse(I, Ind); 4625 }); 4626 if (!UniformInd) 4627 continue; 4628 4629 // Determine if all users of the induction variable update instruction are 4630 // uniform after vectorization. 4631 auto UniformIndUpdate = 4632 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4633 auto *I = cast<Instruction>(U); 4634 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4635 isVectorizedMemAccessUse(I, IndUpdate); 4636 }); 4637 if (!UniformIndUpdate) 4638 continue; 4639 4640 // The induction variable and its update instruction will remain uniform. 4641 Worklist.insert(Ind); 4642 Worklist.insert(IndUpdate); 4643 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 4644 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate 4645 << "\n"); 4646 } 4647 4648 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4649 } 4650 4651 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 4652 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4653 // TODO: It may by useful to do since it's still likely to be dynamically 4654 // uniform if the target can skip. 4655 LLVM_DEBUG( 4656 dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 4657 4658 ORE->emit( 4659 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 4660 << "runtime pointer checks needed. Not enabled for divergent target"); 4661 4662 return None; 4663 } 4664 4665 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4666 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 4667 return computeFeasibleMaxVF(OptForSize, TC); 4668 4669 if (Legal->getRuntimePointerChecking()->Need) { 4670 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4671 << "runtime pointer checks needed. Enable vectorization of this " 4672 "loop with '#pragma clang loop vectorize(enable)' when " 4673 "compiling with -Os/-Oz"); 4674 LLVM_DEBUG( 4675 dbgs() 4676 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4677 return None; 4678 } 4679 4680 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4681 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4682 << "runtime SCEV checks needed. Enable vectorization of this " 4683 "loop with '#pragma clang loop vectorize(enable)' when " 4684 "compiling with -Os/-Oz"); 4685 LLVM_DEBUG( 4686 dbgs() 4687 << "LV: Aborting. Runtime SCEV check is required with -Os/-Oz.\n"); 4688 return None; 4689 } 4690 4691 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4692 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4693 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4694 << "runtime stride == 1 checks needed. Enable vectorization of " 4695 "this loop with '#pragma clang loop vectorize(enable)' when " 4696 "compiling with -Os/-Oz"); 4697 LLVM_DEBUG( 4698 dbgs() 4699 << "LV: Aborting. Runtime stride check is required with -Os/-Oz.\n"); 4700 return None; 4701 } 4702 4703 // If we optimize the program for size, avoid creating the tail loop. 4704 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4705 4706 if (TC == 1) { 4707 ORE->emit(createMissedAnalysis("SingleIterationLoop") 4708 << "loop trip count is one, irrelevant for vectorization"); 4709 LLVM_DEBUG(dbgs() << "LV: Aborting, single iteration (non) loop.\n"); 4710 return None; 4711 } 4712 4713 // Record that scalar epilogue is not allowed. 4714 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4715 4716 IsScalarEpilogueAllowed = !OptForSize; 4717 4718 // We don't create an epilogue when optimizing for size. 4719 // Invalidate interleave groups that require an epilogue if we can't mask 4720 // the interleave-group. 4721 if (!useMaskedInterleavedAccesses(TTI)) 4722 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4723 4724 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 4725 4726 if (TC > 0 && TC % MaxVF == 0) { 4727 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 4728 return MaxVF; 4729 } 4730 4731 // If we don't know the precise trip count, or if the trip count that we 4732 // found modulo the vectorization factor is not zero, try to fold the tail 4733 // by masking. 4734 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 4735 if (Legal->canFoldTailByMasking()) { 4736 FoldTailByMasking = true; 4737 return MaxVF; 4738 } 4739 4740 if (TC == 0) { 4741 ORE->emit( 4742 createMissedAnalysis("UnknownLoopCountComplexCFG") 4743 << "unable to calculate the loop count due to complex control flow"); 4744 return None; 4745 } 4746 4747 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 4748 << "cannot optimize for size and vectorize at the same time. " 4749 "Enable vectorization of this loop with '#pragma clang loop " 4750 "vectorize(enable)' when compiling with -Os/-Oz"); 4751 return None; 4752 } 4753 4754 unsigned 4755 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 4756 unsigned ConstTripCount) { 4757 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4758 unsigned SmallestType, WidestType; 4759 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4760 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4761 4762 // Get the maximum safe dependence distance in bits computed by LAA. 4763 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4764 // the memory accesses that is most restrictive (involved in the smallest 4765 // dependence distance). 4766 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 4767 4768 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 4769 4770 unsigned MaxVectorSize = WidestRegister / WidestType; 4771 4772 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4773 << " / " << WidestType << " bits.\n"); 4774 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 4775 << WidestRegister << " bits.\n"); 4776 4777 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 4778 " into one vector!"); 4779 if (MaxVectorSize == 0) { 4780 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4781 MaxVectorSize = 1; 4782 return MaxVectorSize; 4783 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 4784 isPowerOf2_32(ConstTripCount)) { 4785 // We need to clamp the VF to be the ConstTripCount. There is no point in 4786 // choosing a higher viable VF as done in the loop below. 4787 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 4788 << ConstTripCount << "\n"); 4789 MaxVectorSize = ConstTripCount; 4790 return MaxVectorSize; 4791 } 4792 4793 unsigned MaxVF = MaxVectorSize; 4794 if (TTI.shouldMaximizeVectorBandwidth(OptForSize) || 4795 (MaximizeBandwidth && !OptForSize)) { 4796 // Collect all viable vectorization factors larger than the default MaxVF 4797 // (i.e. MaxVectorSize). 4798 SmallVector<unsigned, 8> VFs; 4799 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 4800 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 4801 VFs.push_back(VS); 4802 4803 // For each VF calculate its register usage. 4804 auto RUs = calculateRegisterUsage(VFs); 4805 4806 // Select the largest VF which doesn't require more registers than existing 4807 // ones. 4808 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 4809 for (int i = RUs.size() - 1; i >= 0; --i) { 4810 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 4811 MaxVF = VFs[i]; 4812 break; 4813 } 4814 } 4815 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 4816 if (MaxVF < MinVF) { 4817 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 4818 << ") with target's minimum: " << MinVF << '\n'); 4819 MaxVF = MinVF; 4820 } 4821 } 4822 } 4823 return MaxVF; 4824 } 4825 4826 VectorizationFactor 4827 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 4828 float Cost = expectedCost(1).first; 4829 const float ScalarCost = Cost; 4830 unsigned Width = 1; 4831 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4832 4833 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4834 if (ForceVectorization && MaxVF > 1) { 4835 // Ignore scalar width, because the user explicitly wants vectorization. 4836 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 4837 // evaluation. 4838 Cost = std::numeric_limits<float>::max(); 4839 } 4840 4841 for (unsigned i = 2; i <= MaxVF; i *= 2) { 4842 // Notice that the vector loop needs to be executed less times, so 4843 // we need to divide the cost of the vector loops by the width of 4844 // the vector elements. 4845 VectorizationCostTy C = expectedCost(i); 4846 float VectorCost = C.first / (float)i; 4847 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 4848 << " costs: " << (int)VectorCost << ".\n"); 4849 if (!C.second && !ForceVectorization) { 4850 LLVM_DEBUG( 4851 dbgs() << "LV: Not considering vector loop of width " << i 4852 << " because it will not generate any vector instructions.\n"); 4853 continue; 4854 } 4855 if (VectorCost < Cost) { 4856 Cost = VectorCost; 4857 Width = i; 4858 } 4859 } 4860 4861 if (!EnableCondStoresVectorization && NumPredStores) { 4862 ORE->emit(createMissedAnalysis("ConditionalStore") 4863 << "store that is conditionally executed prevents vectorization"); 4864 LLVM_DEBUG( 4865 dbgs() << "LV: No vectorization. There are conditional stores.\n"); 4866 Width = 1; 4867 Cost = ScalarCost; 4868 } 4869 4870 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 4871 << "LV: Vectorization seems to be not beneficial, " 4872 << "but was forced by a user.\n"); 4873 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 4874 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 4875 return Factor; 4876 } 4877 4878 std::pair<unsigned, unsigned> 4879 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 4880 unsigned MinWidth = -1U; 4881 unsigned MaxWidth = 8; 4882 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 4883 4884 // For each block. 4885 for (BasicBlock *BB : TheLoop->blocks()) { 4886 // For each instruction in the loop. 4887 for (Instruction &I : BB->instructionsWithoutDebug()) { 4888 Type *T = I.getType(); 4889 4890 // Skip ignored values. 4891 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 4892 continue; 4893 4894 // Only examine Loads, Stores and PHINodes. 4895 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 4896 continue; 4897 4898 // Examine PHI nodes that are reduction variables. Update the type to 4899 // account for the recurrence type. 4900 if (auto *PN = dyn_cast<PHINode>(&I)) { 4901 if (!Legal->isReductionVariable(PN)) 4902 continue; 4903 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 4904 T = RdxDesc.getRecurrenceType(); 4905 } 4906 4907 // Examine the stored values. 4908 if (auto *ST = dyn_cast<StoreInst>(&I)) 4909 T = ST->getValueOperand()->getType(); 4910 4911 // Ignore loaded pointer types and stored pointer types that are not 4912 // vectorizable. 4913 // 4914 // FIXME: The check here attempts to predict whether a load or store will 4915 // be vectorized. We only know this for certain after a VF has 4916 // been selected. Here, we assume that if an access can be 4917 // vectorized, it will be. We should also look at extending this 4918 // optimization to non-pointer types. 4919 // 4920 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 4921 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 4922 continue; 4923 4924 MinWidth = std::min(MinWidth, 4925 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4926 MaxWidth = std::max(MaxWidth, 4927 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4928 } 4929 } 4930 4931 return {MinWidth, MaxWidth}; 4932 } 4933 4934 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 4935 unsigned VF, 4936 unsigned LoopCost) { 4937 // -- The interleave heuristics -- 4938 // We interleave the loop in order to expose ILP and reduce the loop overhead. 4939 // There are many micro-architectural considerations that we can't predict 4940 // at this level. For example, frontend pressure (on decode or fetch) due to 4941 // code size, or the number and capabilities of the execution ports. 4942 // 4943 // We use the following heuristics to select the interleave count: 4944 // 1. If the code has reductions, then we interleave to break the cross 4945 // iteration dependency. 4946 // 2. If the loop is really small, then we interleave to reduce the loop 4947 // overhead. 4948 // 3. We don't interleave if we think that we will spill registers to memory 4949 // due to the increased register pressure. 4950 4951 // When we optimize for size, we don't interleave. 4952 if (OptForSize) 4953 return 1; 4954 4955 // We used the distance for the interleave count. 4956 if (Legal->getMaxSafeDepDistBytes() != -1U) 4957 return 1; 4958 4959 // Do not interleave loops with a relatively small trip count. 4960 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4961 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 4962 return 1; 4963 4964 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 4965 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 4966 << " registers\n"); 4967 4968 if (VF == 1) { 4969 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 4970 TargetNumRegisters = ForceTargetNumScalarRegs; 4971 } else { 4972 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 4973 TargetNumRegisters = ForceTargetNumVectorRegs; 4974 } 4975 4976 RegisterUsage R = calculateRegisterUsage({VF})[0]; 4977 // We divide by these constants so assume that we have at least one 4978 // instruction that uses at least one register. 4979 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 4980 4981 // We calculate the interleave count using the following formula. 4982 // Subtract the number of loop invariants from the number of available 4983 // registers. These registers are used by all of the interleaved instances. 4984 // Next, divide the remaining registers by the number of registers that is 4985 // required by the loop, in order to estimate how many parallel instances 4986 // fit without causing spills. All of this is rounded down if necessary to be 4987 // a power of two. We want power of two interleave count to simplify any 4988 // addressing operations or alignment considerations. 4989 // We also want power of two interleave counts to ensure that the induction 4990 // variable of the vector loop wraps to zero, when tail is folded by masking; 4991 // this currently happens when OptForSize, in which case IC is set to 1 above. 4992 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 4993 R.MaxLocalUsers); 4994 4995 // Don't count the induction variable as interleaved. 4996 if (EnableIndVarRegisterHeur) 4997 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 4998 std::max(1U, (R.MaxLocalUsers - 1))); 4999 5000 // Clamp the interleave ranges to reasonable counts. 5001 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5002 5003 // Check if the user has overridden the max. 5004 if (VF == 1) { 5005 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5006 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5007 } else { 5008 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5009 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5010 } 5011 5012 // If we did not calculate the cost for VF (because the user selected the VF) 5013 // then we calculate the cost of VF here. 5014 if (LoopCost == 0) 5015 LoopCost = expectedCost(VF).first; 5016 5017 // Clamp the calculated IC to be between the 1 and the max interleave count 5018 // that the target allows. 5019 if (IC > MaxInterleaveCount) 5020 IC = MaxInterleaveCount; 5021 else if (IC < 1) 5022 IC = 1; 5023 5024 // Interleave if we vectorized this loop and there is a reduction that could 5025 // benefit from interleaving. 5026 if (VF > 1 && !Legal->getReductionVars()->empty()) { 5027 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5028 return IC; 5029 } 5030 5031 // Note that if we've already vectorized the loop we will have done the 5032 // runtime check and so interleaving won't require further checks. 5033 bool InterleavingRequiresRuntimePointerCheck = 5034 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5035 5036 // We want to interleave small loops in order to reduce the loop overhead and 5037 // potentially expose ILP opportunities. 5038 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5039 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5040 // We assume that the cost overhead is 1 and we use the cost model 5041 // to estimate the cost of the loop and interleave until the cost of the 5042 // loop overhead is about 5% of the cost of the loop. 5043 unsigned SmallIC = 5044 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5045 5046 // Interleave until store/load ports (estimated by max interleave count) are 5047 // saturated. 5048 unsigned NumStores = Legal->getNumStores(); 5049 unsigned NumLoads = Legal->getNumLoads(); 5050 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5051 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5052 5053 // If we have a scalar reduction (vector reductions are already dealt with 5054 // by this point), we can increase the critical path length if the loop 5055 // we're interleaving is inside another loop. Limit, by default to 2, so the 5056 // critical path only gets increased by one reduction operation. 5057 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 5058 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5059 SmallIC = std::min(SmallIC, F); 5060 StoresIC = std::min(StoresIC, F); 5061 LoadsIC = std::min(LoadsIC, F); 5062 } 5063 5064 if (EnableLoadStoreRuntimeInterleave && 5065 std::max(StoresIC, LoadsIC) > SmallIC) { 5066 LLVM_DEBUG( 5067 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5068 return std::max(StoresIC, LoadsIC); 5069 } 5070 5071 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5072 return SmallIC; 5073 } 5074 5075 // Interleave if this is a large loop (small loops are already dealt with by 5076 // this point) that could benefit from interleaving. 5077 bool HasReductions = !Legal->getReductionVars()->empty(); 5078 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5079 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5080 return IC; 5081 } 5082 5083 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5084 return 1; 5085 } 5086 5087 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5088 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5089 // This function calculates the register usage by measuring the highest number 5090 // of values that are alive at a single location. Obviously, this is a very 5091 // rough estimation. We scan the loop in a topological order in order and 5092 // assign a number to each instruction. We use RPO to ensure that defs are 5093 // met before their users. We assume that each instruction that has in-loop 5094 // users starts an interval. We record every time that an in-loop value is 5095 // used, so we have a list of the first and last occurrences of each 5096 // instruction. Next, we transpose this data structure into a multi map that 5097 // holds the list of intervals that *end* at a specific location. This multi 5098 // map allows us to perform a linear search. We scan the instructions linearly 5099 // and record each time that a new interval starts, by placing it in a set. 5100 // If we find this value in the multi-map then we remove it from the set. 5101 // The max register usage is the maximum size of the set. 5102 // We also search for instructions that are defined outside the loop, but are 5103 // used inside the loop. We need this number separately from the max-interval 5104 // usage number because when we unroll, loop-invariant values do not take 5105 // more register. 5106 LoopBlocksDFS DFS(TheLoop); 5107 DFS.perform(LI); 5108 5109 RegisterUsage RU; 5110 5111 // Each 'key' in the map opens a new interval. The values 5112 // of the map are the index of the 'last seen' usage of the 5113 // instruction that is the key. 5114 using IntervalMap = DenseMap<Instruction *, unsigned>; 5115 5116 // Maps instruction to its index. 5117 SmallVector<Instruction *, 64> IdxToInstr; 5118 // Marks the end of each interval. 5119 IntervalMap EndPoint; 5120 // Saves the list of instruction indices that are used in the loop. 5121 SmallPtrSet<Instruction *, 8> Ends; 5122 // Saves the list of values that are used in the loop but are 5123 // defined outside the loop, such as arguments and constants. 5124 SmallPtrSet<Value *, 8> LoopInvariants; 5125 5126 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5127 for (Instruction &I : BB->instructionsWithoutDebug()) { 5128 IdxToInstr.push_back(&I); 5129 5130 // Save the end location of each USE. 5131 for (Value *U : I.operands()) { 5132 auto *Instr = dyn_cast<Instruction>(U); 5133 5134 // Ignore non-instruction values such as arguments, constants, etc. 5135 if (!Instr) 5136 continue; 5137 5138 // If this instruction is outside the loop then record it and continue. 5139 if (!TheLoop->contains(Instr)) { 5140 LoopInvariants.insert(Instr); 5141 continue; 5142 } 5143 5144 // Overwrite previous end points. 5145 EndPoint[Instr] = IdxToInstr.size(); 5146 Ends.insert(Instr); 5147 } 5148 } 5149 } 5150 5151 // Saves the list of intervals that end with the index in 'key'. 5152 using InstrList = SmallVector<Instruction *, 2>; 5153 DenseMap<unsigned, InstrList> TransposeEnds; 5154 5155 // Transpose the EndPoints to a list of values that end at each index. 5156 for (auto &Interval : EndPoint) 5157 TransposeEnds[Interval.second].push_back(Interval.first); 5158 5159 SmallPtrSet<Instruction *, 8> OpenIntervals; 5160 5161 // Get the size of the widest register. 5162 unsigned MaxSafeDepDist = -1U; 5163 if (Legal->getMaxSafeDepDistBytes() != -1U) 5164 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5165 unsigned WidestRegister = 5166 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5167 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5168 5169 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5170 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5171 5172 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5173 5174 // A lambda that gets the register usage for the given type and VF. 5175 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5176 if (Ty->isTokenTy()) 5177 return 0U; 5178 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5179 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5180 }; 5181 5182 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5183 Instruction *I = IdxToInstr[i]; 5184 5185 // Remove all of the instructions that end at this location. 5186 InstrList &List = TransposeEnds[i]; 5187 for (Instruction *ToRemove : List) 5188 OpenIntervals.erase(ToRemove); 5189 5190 // Ignore instructions that are never used within the loop. 5191 if (Ends.find(I) == Ends.end()) 5192 continue; 5193 5194 // Skip ignored values. 5195 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5196 continue; 5197 5198 // For each VF find the maximum usage of registers. 5199 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5200 if (VFs[j] == 1) { 5201 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5202 continue; 5203 } 5204 collectUniformsAndScalars(VFs[j]); 5205 // Count the number of live intervals. 5206 unsigned RegUsage = 0; 5207 for (auto Inst : OpenIntervals) { 5208 // Skip ignored values for VF > 1. 5209 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() || 5210 isScalarAfterVectorization(Inst, VFs[j])) 5211 continue; 5212 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5213 } 5214 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5215 } 5216 5217 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5218 << OpenIntervals.size() << '\n'); 5219 5220 // Add the current instruction to the list of open intervals. 5221 OpenIntervals.insert(I); 5222 } 5223 5224 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5225 unsigned Invariant = 0; 5226 if (VFs[i] == 1) 5227 Invariant = LoopInvariants.size(); 5228 else { 5229 for (auto Inst : LoopInvariants) 5230 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5231 } 5232 5233 LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5234 LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5235 LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant 5236 << '\n'); 5237 5238 RU.LoopInvariantRegs = Invariant; 5239 RU.MaxLocalUsers = MaxUsages[i]; 5240 RUs[i] = RU; 5241 } 5242 5243 return RUs; 5244 } 5245 5246 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5247 // TODO: Cost model for emulated masked load/store is completely 5248 // broken. This hack guides the cost model to use an artificially 5249 // high enough value to practically disable vectorization with such 5250 // operations, except where previously deployed legality hack allowed 5251 // using very low cost values. This is to avoid regressions coming simply 5252 // from moving "masked load/store" check from legality to cost model. 5253 // Masked Load/Gather emulation was previously never allowed. 5254 // Limited number of Masked Store/Scatter emulation was allowed. 5255 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5256 return isa<LoadInst>(I) || 5257 (isa<StoreInst>(I) && 5258 NumPredStores > NumberOfStoresToPredicate); 5259 } 5260 5261 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5262 // If we aren't vectorizing the loop, or if we've already collected the 5263 // instructions to scalarize, there's nothing to do. Collection may already 5264 // have occurred if we have a user-selected VF and are now computing the 5265 // expected cost for interleaving. 5266 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5267 return; 5268 5269 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5270 // not profitable to scalarize any instructions, the presence of VF in the 5271 // map will indicate that we've analyzed it already. 5272 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5273 5274 // Find all the instructions that are scalar with predication in the loop and 5275 // determine if it would be better to not if-convert the blocks they are in. 5276 // If so, we also record the instructions to scalarize. 5277 for (BasicBlock *BB : TheLoop->blocks()) { 5278 if (!blockNeedsPredication(BB)) 5279 continue; 5280 for (Instruction &I : *BB) 5281 if (isScalarWithPredication(&I)) { 5282 ScalarCostsTy ScalarCosts; 5283 // Do not apply discount logic if hacked cost is needed 5284 // for emulated masked memrefs. 5285 if (!useEmulatedMaskMemRefHack(&I) && 5286 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5287 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5288 // Remember that BB will remain after vectorization. 5289 PredicatedBBsAfterVectorization.insert(BB); 5290 } 5291 } 5292 } 5293 5294 int LoopVectorizationCostModel::computePredInstDiscount( 5295 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5296 unsigned VF) { 5297 assert(!isUniformAfterVectorization(PredInst, VF) && 5298 "Instruction marked uniform-after-vectorization will be predicated"); 5299 5300 // Initialize the discount to zero, meaning that the scalar version and the 5301 // vector version cost the same. 5302 int Discount = 0; 5303 5304 // Holds instructions to analyze. The instructions we visit are mapped in 5305 // ScalarCosts. Those instructions are the ones that would be scalarized if 5306 // we find that the scalar version costs less. 5307 SmallVector<Instruction *, 8> Worklist; 5308 5309 // Returns true if the given instruction can be scalarized. 5310 auto canBeScalarized = [&](Instruction *I) -> bool { 5311 // We only attempt to scalarize instructions forming a single-use chain 5312 // from the original predicated block that would otherwise be vectorized. 5313 // Although not strictly necessary, we give up on instructions we know will 5314 // already be scalar to avoid traversing chains that are unlikely to be 5315 // beneficial. 5316 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5317 isScalarAfterVectorization(I, VF)) 5318 return false; 5319 5320 // If the instruction is scalar with predication, it will be analyzed 5321 // separately. We ignore it within the context of PredInst. 5322 if (isScalarWithPredication(I)) 5323 return false; 5324 5325 // If any of the instruction's operands are uniform after vectorization, 5326 // the instruction cannot be scalarized. This prevents, for example, a 5327 // masked load from being scalarized. 5328 // 5329 // We assume we will only emit a value for lane zero of an instruction 5330 // marked uniform after vectorization, rather than VF identical values. 5331 // Thus, if we scalarize an instruction that uses a uniform, we would 5332 // create uses of values corresponding to the lanes we aren't emitting code 5333 // for. This behavior can be changed by allowing getScalarValue to clone 5334 // the lane zero values for uniforms rather than asserting. 5335 for (Use &U : I->operands()) 5336 if (auto *J = dyn_cast<Instruction>(U.get())) 5337 if (isUniformAfterVectorization(J, VF)) 5338 return false; 5339 5340 // Otherwise, we can scalarize the instruction. 5341 return true; 5342 }; 5343 5344 // Returns true if an operand that cannot be scalarized must be extracted 5345 // from a vector. We will account for this scalarization overhead below. Note 5346 // that the non-void predicated instructions are placed in their own blocks, 5347 // and their return values are inserted into vectors. Thus, an extract would 5348 // still be required. 5349 auto needsExtract = [&](Instruction *I) -> bool { 5350 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 5351 }; 5352 5353 // Compute the expected cost discount from scalarizing the entire expression 5354 // feeding the predicated instruction. We currently only consider expressions 5355 // that are single-use instruction chains. 5356 Worklist.push_back(PredInst); 5357 while (!Worklist.empty()) { 5358 Instruction *I = Worklist.pop_back_val(); 5359 5360 // If we've already analyzed the instruction, there's nothing to do. 5361 if (ScalarCosts.find(I) != ScalarCosts.end()) 5362 continue; 5363 5364 // Compute the cost of the vector instruction. Note that this cost already 5365 // includes the scalarization overhead of the predicated instruction. 5366 unsigned VectorCost = getInstructionCost(I, VF).first; 5367 5368 // Compute the cost of the scalarized instruction. This cost is the cost of 5369 // the instruction as if it wasn't if-converted and instead remained in the 5370 // predicated block. We will scale this cost by block probability after 5371 // computing the scalarization overhead. 5372 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5373 5374 // Compute the scalarization overhead of needed insertelement instructions 5375 // and phi nodes. 5376 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5377 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 5378 true, false); 5379 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5380 } 5381 5382 // Compute the scalarization overhead of needed extractelement 5383 // instructions. For each of the instruction's operands, if the operand can 5384 // be scalarized, add it to the worklist; otherwise, account for the 5385 // overhead. 5386 for (Use &U : I->operands()) 5387 if (auto *J = dyn_cast<Instruction>(U.get())) { 5388 assert(VectorType::isValidElementType(J->getType()) && 5389 "Instruction has non-scalar type"); 5390 if (canBeScalarized(J)) 5391 Worklist.push_back(J); 5392 else if (needsExtract(J)) 5393 ScalarCost += TTI.getScalarizationOverhead( 5394 ToVectorTy(J->getType(),VF), false, true); 5395 } 5396 5397 // Scale the total scalar cost by block probability. 5398 ScalarCost /= getReciprocalPredBlockProb(); 5399 5400 // Compute the discount. A non-negative discount means the vector version 5401 // of the instruction costs more, and scalarizing would be beneficial. 5402 Discount += VectorCost - ScalarCost; 5403 ScalarCosts[I] = ScalarCost; 5404 } 5405 5406 return Discount; 5407 } 5408 5409 LoopVectorizationCostModel::VectorizationCostTy 5410 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5411 VectorizationCostTy Cost; 5412 5413 // For each block. 5414 for (BasicBlock *BB : TheLoop->blocks()) { 5415 VectorizationCostTy BlockCost; 5416 5417 // For each instruction in the old loop. 5418 for (Instruction &I : BB->instructionsWithoutDebug()) { 5419 // Skip ignored values. 5420 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5421 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5422 continue; 5423 5424 VectorizationCostTy C = getInstructionCost(&I, VF); 5425 5426 // Check if we should override the cost. 5427 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5428 C.first = ForceTargetInstructionCost; 5429 5430 BlockCost.first += C.first; 5431 BlockCost.second |= C.second; 5432 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5433 << " for VF " << VF << " For instruction: " << I 5434 << '\n'); 5435 } 5436 5437 // If we are vectorizing a predicated block, it will have been 5438 // if-converted. This means that the block's instructions (aside from 5439 // stores and instructions that may divide by zero) will now be 5440 // unconditionally executed. For the scalar case, we may not always execute 5441 // the predicated block. Thus, scale the block's cost by the probability of 5442 // executing it. 5443 if (VF == 1 && blockNeedsPredication(BB)) 5444 BlockCost.first /= getReciprocalPredBlockProb(); 5445 5446 Cost.first += BlockCost.first; 5447 Cost.second |= BlockCost.second; 5448 } 5449 5450 return Cost; 5451 } 5452 5453 /// Gets Address Access SCEV after verifying that the access pattern 5454 /// is loop invariant except the induction variable dependence. 5455 /// 5456 /// This SCEV can be sent to the Target in order to estimate the address 5457 /// calculation cost. 5458 static const SCEV *getAddressAccessSCEV( 5459 Value *Ptr, 5460 LoopVectorizationLegality *Legal, 5461 PredicatedScalarEvolution &PSE, 5462 const Loop *TheLoop) { 5463 5464 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5465 if (!Gep) 5466 return nullptr; 5467 5468 // We are looking for a gep with all loop invariant indices except for one 5469 // which should be an induction variable. 5470 auto SE = PSE.getSE(); 5471 unsigned NumOperands = Gep->getNumOperands(); 5472 for (unsigned i = 1; i < NumOperands; ++i) { 5473 Value *Opd = Gep->getOperand(i); 5474 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5475 !Legal->isInductionVariable(Opd)) 5476 return nullptr; 5477 } 5478 5479 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5480 return PSE.getSCEV(Ptr); 5481 } 5482 5483 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5484 return Legal->hasStride(I->getOperand(0)) || 5485 Legal->hasStride(I->getOperand(1)); 5486 } 5487 5488 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5489 unsigned VF) { 5490 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5491 Type *ValTy = getMemInstValueType(I); 5492 auto SE = PSE.getSE(); 5493 5494 unsigned Alignment = getLoadStoreAlignment(I); 5495 unsigned AS = getLoadStoreAddressSpace(I); 5496 Value *Ptr = getLoadStorePointerOperand(I); 5497 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5498 5499 // Figure out whether the access is strided and get the stride value 5500 // if it's known in compile time 5501 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5502 5503 // Get the cost of the scalar memory instruction and address computation. 5504 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5505 5506 // Don't pass *I here, since it is scalar but will actually be part of a 5507 // vectorized loop where the user of it is a vectorized instruction. 5508 Cost += VF * 5509 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 5510 AS); 5511 5512 // Get the overhead of the extractelement and insertelement instructions 5513 // we might create due to scalarization. 5514 Cost += getScalarizationOverhead(I, VF, TTI); 5515 5516 // If we have a predicated store, it may not be executed for each vector 5517 // lane. Scale the cost by the probability of executing the predicated 5518 // block. 5519 if (isPredicatedInst(I)) { 5520 Cost /= getReciprocalPredBlockProb(); 5521 5522 if (useEmulatedMaskMemRefHack(I)) 5523 // Artificially setting to a high enough value to practically disable 5524 // vectorization with such operations. 5525 Cost = 3000000; 5526 } 5527 5528 return Cost; 5529 } 5530 5531 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5532 unsigned VF) { 5533 Type *ValTy = getMemInstValueType(I); 5534 Type *VectorTy = ToVectorTy(ValTy, VF); 5535 unsigned Alignment = getLoadStoreAlignment(I); 5536 Value *Ptr = getLoadStorePointerOperand(I); 5537 unsigned AS = getLoadStoreAddressSpace(I); 5538 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5539 5540 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5541 "Stride should be 1 or -1 for consecutive memory access"); 5542 unsigned Cost = 0; 5543 if (Legal->isMaskRequired(I)) 5544 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5545 else 5546 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 5547 5548 bool Reverse = ConsecutiveStride < 0; 5549 if (Reverse) 5550 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5551 return Cost; 5552 } 5553 5554 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5555 unsigned VF) { 5556 Type *ValTy = getMemInstValueType(I); 5557 Type *VectorTy = ToVectorTy(ValTy, VF); 5558 unsigned Alignment = getLoadStoreAlignment(I); 5559 unsigned AS = getLoadStoreAddressSpace(I); 5560 if (isa<LoadInst>(I)) { 5561 return TTI.getAddressComputationCost(ValTy) + 5562 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 5563 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5564 } 5565 StoreInst *SI = cast<StoreInst>(I); 5566 5567 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5568 return TTI.getAddressComputationCost(ValTy) + 5569 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) + 5570 (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost( 5571 Instruction::ExtractElement, 5572 VectorTy, VF - 1)); 5573 } 5574 5575 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5576 unsigned VF) { 5577 Type *ValTy = getMemInstValueType(I); 5578 Type *VectorTy = ToVectorTy(ValTy, VF); 5579 unsigned Alignment = getLoadStoreAlignment(I); 5580 Value *Ptr = getLoadStorePointerOperand(I); 5581 5582 return TTI.getAddressComputationCost(VectorTy) + 5583 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5584 Legal->isMaskRequired(I), Alignment); 5585 } 5586 5587 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5588 unsigned VF) { 5589 Type *ValTy = getMemInstValueType(I); 5590 Type *VectorTy = ToVectorTy(ValTy, VF); 5591 unsigned AS = getLoadStoreAddressSpace(I); 5592 5593 auto Group = getInterleavedAccessGroup(I); 5594 assert(Group && "Fail to get an interleaved access group."); 5595 5596 unsigned InterleaveFactor = Group->getFactor(); 5597 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5598 5599 // Holds the indices of existing members in an interleaved load group. 5600 // An interleaved store group doesn't need this as it doesn't allow gaps. 5601 SmallVector<unsigned, 4> Indices; 5602 if (isa<LoadInst>(I)) { 5603 for (unsigned i = 0; i < InterleaveFactor; i++) 5604 if (Group->getMember(i)) 5605 Indices.push_back(i); 5606 } 5607 5608 // Calculate the cost of the whole interleaved group. 5609 bool UseMaskForGaps = 5610 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 5611 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5612 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5613 Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps); 5614 5615 if (Group->isReverse()) { 5616 // TODO: Add support for reversed masked interleaved access. 5617 assert(!Legal->isMaskRequired(I) && 5618 "Reverse masked interleaved access not supported."); 5619 Cost += Group->getNumMembers() * 5620 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5621 } 5622 return Cost; 5623 } 5624 5625 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5626 unsigned VF) { 5627 // Calculate scalar cost only. Vectorization cost should be ready at this 5628 // moment. 5629 if (VF == 1) { 5630 Type *ValTy = getMemInstValueType(I); 5631 unsigned Alignment = getLoadStoreAlignment(I); 5632 unsigned AS = getLoadStoreAddressSpace(I); 5633 5634 return TTI.getAddressComputationCost(ValTy) + 5635 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 5636 } 5637 return getWideningCost(I, VF); 5638 } 5639 5640 LoopVectorizationCostModel::VectorizationCostTy 5641 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5642 // If we know that this instruction will remain uniform, check the cost of 5643 // the scalar version. 5644 if (isUniformAfterVectorization(I, VF)) 5645 VF = 1; 5646 5647 if (VF > 1 && isProfitableToScalarize(I, VF)) 5648 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5649 5650 // Forced scalars do not have any scalarization overhead. 5651 auto ForcedScalar = ForcedScalars.find(VF); 5652 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 5653 auto InstSet = ForcedScalar->second; 5654 if (InstSet.find(I) != InstSet.end()) 5655 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5656 } 5657 5658 Type *VectorTy; 5659 unsigned C = getInstructionCost(I, VF, VectorTy); 5660 5661 bool TypeNotScalarized = 5662 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 5663 return VectorizationCostTy(C, TypeNotScalarized); 5664 } 5665 5666 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 5667 if (VF == 1) 5668 return; 5669 NumPredStores = 0; 5670 for (BasicBlock *BB : TheLoop->blocks()) { 5671 // For each instruction in the old loop. 5672 for (Instruction &I : *BB) { 5673 Value *Ptr = getLoadStorePointerOperand(&I); 5674 if (!Ptr) 5675 continue; 5676 5677 // TODO: We should generate better code and update the cost model for 5678 // predicated uniform stores. Today they are treated as any other 5679 // predicated store (see added test cases in 5680 // invariant-store-vectorization.ll). 5681 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 5682 NumPredStores++; 5683 5684 if (Legal->isUniform(Ptr) && 5685 // Conditional loads and stores should be scalarized and predicated. 5686 // isScalarWithPredication cannot be used here since masked 5687 // gather/scatters are not considered scalar with predication. 5688 !Legal->blockNeedsPredication(I.getParent())) { 5689 // TODO: Avoid replicating loads and stores instead of 5690 // relying on instcombine to remove them. 5691 // Load: Scalar load + broadcast 5692 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 5693 unsigned Cost = getUniformMemOpCost(&I, VF); 5694 setWideningDecision(&I, VF, CM_Scalarize, Cost); 5695 continue; 5696 } 5697 5698 // We assume that widening is the best solution when possible. 5699 if (memoryInstructionCanBeWidened(&I, VF)) { 5700 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 5701 int ConsecutiveStride = 5702 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 5703 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5704 "Expected consecutive stride."); 5705 InstWidening Decision = 5706 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 5707 setWideningDecision(&I, VF, Decision, Cost); 5708 continue; 5709 } 5710 5711 // Choose between Interleaving, Gather/Scatter or Scalarization. 5712 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 5713 unsigned NumAccesses = 1; 5714 if (isAccessInterleaved(&I)) { 5715 auto Group = getInterleavedAccessGroup(&I); 5716 assert(Group && "Fail to get an interleaved access group."); 5717 5718 // Make one decision for the whole group. 5719 if (getWideningDecision(&I, VF) != CM_Unknown) 5720 continue; 5721 5722 NumAccesses = Group->getNumMembers(); 5723 if (interleavedAccessCanBeWidened(&I, VF)) 5724 InterleaveCost = getInterleaveGroupCost(&I, VF); 5725 } 5726 5727 unsigned GatherScatterCost = 5728 isLegalGatherOrScatter(&I) 5729 ? getGatherScatterCost(&I, VF) * NumAccesses 5730 : std::numeric_limits<unsigned>::max(); 5731 5732 unsigned ScalarizationCost = 5733 getMemInstScalarizationCost(&I, VF) * NumAccesses; 5734 5735 // Choose better solution for the current VF, 5736 // write down this decision and use it during vectorization. 5737 unsigned Cost; 5738 InstWidening Decision; 5739 if (InterleaveCost <= GatherScatterCost && 5740 InterleaveCost < ScalarizationCost) { 5741 Decision = CM_Interleave; 5742 Cost = InterleaveCost; 5743 } else if (GatherScatterCost < ScalarizationCost) { 5744 Decision = CM_GatherScatter; 5745 Cost = GatherScatterCost; 5746 } else { 5747 Decision = CM_Scalarize; 5748 Cost = ScalarizationCost; 5749 } 5750 // If the instructions belongs to an interleave group, the whole group 5751 // receives the same decision. The whole group receives the cost, but 5752 // the cost will actually be assigned to one instruction. 5753 if (auto Group = getInterleavedAccessGroup(&I)) 5754 setWideningDecision(Group, VF, Decision, Cost); 5755 else 5756 setWideningDecision(&I, VF, Decision, Cost); 5757 } 5758 } 5759 5760 // Make sure that any load of address and any other address computation 5761 // remains scalar unless there is gather/scatter support. This avoids 5762 // inevitable extracts into address registers, and also has the benefit of 5763 // activating LSR more, since that pass can't optimize vectorized 5764 // addresses. 5765 if (TTI.prefersVectorizedAddressing()) 5766 return; 5767 5768 // Start with all scalar pointer uses. 5769 SmallPtrSet<Instruction *, 8> AddrDefs; 5770 for (BasicBlock *BB : TheLoop->blocks()) 5771 for (Instruction &I : *BB) { 5772 Instruction *PtrDef = 5773 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 5774 if (PtrDef && TheLoop->contains(PtrDef) && 5775 getWideningDecision(&I, VF) != CM_GatherScatter) 5776 AddrDefs.insert(PtrDef); 5777 } 5778 5779 // Add all instructions used to generate the addresses. 5780 SmallVector<Instruction *, 4> Worklist; 5781 for (auto *I : AddrDefs) 5782 Worklist.push_back(I); 5783 while (!Worklist.empty()) { 5784 Instruction *I = Worklist.pop_back_val(); 5785 for (auto &Op : I->operands()) 5786 if (auto *InstOp = dyn_cast<Instruction>(Op)) 5787 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 5788 AddrDefs.insert(InstOp).second) 5789 Worklist.push_back(InstOp); 5790 } 5791 5792 for (auto *I : AddrDefs) { 5793 if (isa<LoadInst>(I)) { 5794 // Setting the desired widening decision should ideally be handled in 5795 // by cost functions, but since this involves the task of finding out 5796 // if the loaded register is involved in an address computation, it is 5797 // instead changed here when we know this is the case. 5798 InstWidening Decision = getWideningDecision(I, VF); 5799 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 5800 // Scalarize a widened load of address. 5801 setWideningDecision(I, VF, CM_Scalarize, 5802 (VF * getMemoryInstructionCost(I, 1))); 5803 else if (auto Group = getInterleavedAccessGroup(I)) { 5804 // Scalarize an interleave group of address loads. 5805 for (unsigned I = 0; I < Group->getFactor(); ++I) { 5806 if (Instruction *Member = Group->getMember(I)) 5807 setWideningDecision(Member, VF, CM_Scalarize, 5808 (VF * getMemoryInstructionCost(Member, 1))); 5809 } 5810 } 5811 } else 5812 // Make sure I gets scalarized and a cost estimate without 5813 // scalarization overhead. 5814 ForcedScalars[VF].insert(I); 5815 } 5816 } 5817 5818 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5819 unsigned VF, 5820 Type *&VectorTy) { 5821 Type *RetTy = I->getType(); 5822 if (canTruncateToMinimalBitwidth(I, VF)) 5823 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5824 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 5825 auto SE = PSE.getSE(); 5826 5827 // TODO: We need to estimate the cost of intrinsic calls. 5828 switch (I->getOpcode()) { 5829 case Instruction::GetElementPtr: 5830 // We mark this instruction as zero-cost because the cost of GEPs in 5831 // vectorized code depends on whether the corresponding memory instruction 5832 // is scalarized or not. Therefore, we handle GEPs with the memory 5833 // instruction cost. 5834 return 0; 5835 case Instruction::Br: { 5836 // In cases of scalarized and predicated instructions, there will be VF 5837 // predicated blocks in the vectorized loop. Each branch around these 5838 // blocks requires also an extract of its vector compare i1 element. 5839 bool ScalarPredicatedBB = false; 5840 BranchInst *BI = cast<BranchInst>(I); 5841 if (VF > 1 && BI->isConditional() && 5842 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 5843 PredicatedBBsAfterVectorization.end() || 5844 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 5845 PredicatedBBsAfterVectorization.end())) 5846 ScalarPredicatedBB = true; 5847 5848 if (ScalarPredicatedBB) { 5849 // Return cost for branches around scalarized and predicated blocks. 5850 Type *Vec_i1Ty = 5851 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 5852 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 5853 (TTI.getCFInstrCost(Instruction::Br) * VF)); 5854 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 5855 // The back-edge branch will remain, as will all scalar branches. 5856 return TTI.getCFInstrCost(Instruction::Br); 5857 else 5858 // This branch will be eliminated by if-conversion. 5859 return 0; 5860 // Note: We currently assume zero cost for an unconditional branch inside 5861 // a predicated block since it will become a fall-through, although we 5862 // may decide in the future to call TTI for all branches. 5863 } 5864 case Instruction::PHI: { 5865 auto *Phi = cast<PHINode>(I); 5866 5867 // First-order recurrences are replaced by vector shuffles inside the loop. 5868 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 5869 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5870 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5871 VectorTy, VF - 1, VectorType::get(RetTy, 1)); 5872 5873 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 5874 // converted into select instructions. We require N - 1 selects per phi 5875 // node, where N is the number of incoming values. 5876 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 5877 return (Phi->getNumIncomingValues() - 1) * 5878 TTI.getCmpSelInstrCost( 5879 Instruction::Select, ToVectorTy(Phi->getType(), VF), 5880 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 5881 5882 return TTI.getCFInstrCost(Instruction::PHI); 5883 } 5884 case Instruction::UDiv: 5885 case Instruction::SDiv: 5886 case Instruction::URem: 5887 case Instruction::SRem: 5888 // If we have a predicated instruction, it may not be executed for each 5889 // vector lane. Get the scalarization cost and scale this amount by the 5890 // probability of executing the predicated block. If the instruction is not 5891 // predicated, we fall through to the next case. 5892 if (VF > 1 && isScalarWithPredication(I)) { 5893 unsigned Cost = 0; 5894 5895 // These instructions have a non-void type, so account for the phi nodes 5896 // that we will create. This cost is likely to be zero. The phi node 5897 // cost, if any, should be scaled by the block probability because it 5898 // models a copy at the end of each predicated block. 5899 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 5900 5901 // The cost of the non-predicated instruction. 5902 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 5903 5904 // The cost of insertelement and extractelement instructions needed for 5905 // scalarization. 5906 Cost += getScalarizationOverhead(I, VF, TTI); 5907 5908 // Scale the cost by the probability of executing the predicated blocks. 5909 // This assumes the predicated block for each vector lane is equally 5910 // likely. 5911 return Cost / getReciprocalPredBlockProb(); 5912 } 5913 LLVM_FALLTHROUGH; 5914 case Instruction::Add: 5915 case Instruction::FAdd: 5916 case Instruction::Sub: 5917 case Instruction::FSub: 5918 case Instruction::Mul: 5919 case Instruction::FMul: 5920 case Instruction::FDiv: 5921 case Instruction::FRem: 5922 case Instruction::Shl: 5923 case Instruction::LShr: 5924 case Instruction::AShr: 5925 case Instruction::And: 5926 case Instruction::Or: 5927 case Instruction::Xor: { 5928 // Since we will replace the stride by 1 the multiplication should go away. 5929 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5930 return 0; 5931 // Certain instructions can be cheaper to vectorize if they have a constant 5932 // second vector operand. One example of this are shifts on x86. 5933 Value *Op2 = I->getOperand(1); 5934 TargetTransformInfo::OperandValueProperties Op2VP; 5935 TargetTransformInfo::OperandValueKind Op2VK = 5936 TTI.getOperandInfo(Op2, Op2VP); 5937 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 5938 Op2VK = TargetTransformInfo::OK_UniformValue; 5939 5940 SmallVector<const Value *, 4> Operands(I->operand_values()); 5941 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 5942 return N * TTI.getArithmeticInstrCost( 5943 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 5944 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands); 5945 } 5946 case Instruction::Select: { 5947 SelectInst *SI = cast<SelectInst>(I); 5948 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5949 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5950 Type *CondTy = SI->getCondition()->getType(); 5951 if (!ScalarCond) 5952 CondTy = VectorType::get(CondTy, VF); 5953 5954 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 5955 } 5956 case Instruction::ICmp: 5957 case Instruction::FCmp: { 5958 Type *ValTy = I->getOperand(0)->getType(); 5959 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 5960 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 5961 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 5962 VectorTy = ToVectorTy(ValTy, VF); 5963 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 5964 } 5965 case Instruction::Store: 5966 case Instruction::Load: { 5967 unsigned Width = VF; 5968 if (Width > 1) { 5969 InstWidening Decision = getWideningDecision(I, Width); 5970 assert(Decision != CM_Unknown && 5971 "CM decision should be taken at this point"); 5972 if (Decision == CM_Scalarize) 5973 Width = 1; 5974 } 5975 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 5976 return getMemoryInstructionCost(I, VF); 5977 } 5978 case Instruction::ZExt: 5979 case Instruction::SExt: 5980 case Instruction::FPToUI: 5981 case Instruction::FPToSI: 5982 case Instruction::FPExt: 5983 case Instruction::PtrToInt: 5984 case Instruction::IntToPtr: 5985 case Instruction::SIToFP: 5986 case Instruction::UIToFP: 5987 case Instruction::Trunc: 5988 case Instruction::FPTrunc: 5989 case Instruction::BitCast: { 5990 // We optimize the truncation of induction variables having constant 5991 // integer steps. The cost of these truncations is the same as the scalar 5992 // operation. 5993 if (isOptimizableIVTruncate(I, VF)) { 5994 auto *Trunc = cast<TruncInst>(I); 5995 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 5996 Trunc->getSrcTy(), Trunc); 5997 } 5998 5999 Type *SrcScalarTy = I->getOperand(0)->getType(); 6000 Type *SrcVecTy = 6001 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 6002 if (canTruncateToMinimalBitwidth(I, VF)) { 6003 // This cast is going to be shrunk. This may remove the cast or it might 6004 // turn it into slightly different cast. For example, if MinBW == 16, 6005 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6006 // 6007 // Calculate the modified src and dest types. 6008 Type *MinVecTy = VectorTy; 6009 if (I->getOpcode() == Instruction::Trunc) { 6010 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6011 VectorTy = 6012 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6013 } else if (I->getOpcode() == Instruction::ZExt || 6014 I->getOpcode() == Instruction::SExt) { 6015 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6016 VectorTy = 6017 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6018 } 6019 } 6020 6021 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6022 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 6023 } 6024 case Instruction::Call: { 6025 bool NeedToScalarize; 6026 CallInst *CI = cast<CallInst>(I); 6027 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6028 if (getVectorIntrinsicIDForCall(CI, TLI)) 6029 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6030 return CallCost; 6031 } 6032 default: 6033 // The cost of executing VF copies of the scalar instruction. This opcode 6034 // is unknown. Assume that it is the same as 'mul'. 6035 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6036 getScalarizationOverhead(I, VF, TTI); 6037 } // end of switch. 6038 } 6039 6040 char LoopVectorize::ID = 0; 6041 6042 static const char lv_name[] = "Loop Vectorization"; 6043 6044 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6045 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6046 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6047 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6048 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6049 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6050 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6051 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6052 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6053 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6054 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6055 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6056 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6057 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6058 6059 namespace llvm { 6060 6061 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6062 bool VectorizeOnlyWhenForced) { 6063 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6064 } 6065 6066 } // end namespace llvm 6067 6068 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6069 // Check if the pointer operand of a load or store instruction is 6070 // consecutive. 6071 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6072 return Legal->isConsecutivePtr(Ptr); 6073 return false; 6074 } 6075 6076 void LoopVectorizationCostModel::collectValuesToIgnore() { 6077 // Ignore ephemeral values. 6078 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6079 6080 // Ignore type-promoting instructions we identified during reduction 6081 // detection. 6082 for (auto &Reduction : *Legal->getReductionVars()) { 6083 RecurrenceDescriptor &RedDes = Reduction.second; 6084 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6085 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6086 } 6087 // Ignore type-casting instructions we identified during induction 6088 // detection. 6089 for (auto &Induction : *Legal->getInductionVars()) { 6090 InductionDescriptor &IndDes = Induction.second; 6091 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6092 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6093 } 6094 } 6095 6096 // TODO: we could return a pair of values that specify the max VF and 6097 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 6098 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 6099 // doesn't have a cost model that can choose which plan to execute if 6100 // more than one is generated. 6101 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 6102 LoopVectorizationCostModel &CM) { 6103 unsigned WidestType; 6104 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 6105 return WidestVectorRegBits / WidestType; 6106 } 6107 6108 VectorizationFactor 6109 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize, 6110 unsigned UserVF) { 6111 unsigned VF = UserVF; 6112 // Outer loop handling: They may require CFG and instruction level 6113 // transformations before even evaluating whether vectorization is profitable. 6114 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6115 // the vectorization pipeline. 6116 if (!OrigLoop->empty()) { 6117 // If the user doesn't provide a vectorization factor, determine a 6118 // reasonable one. 6119 if (!UserVF) { 6120 // We set VF to 4 for stress testing. 6121 if (VPlanBuildStressTest) 6122 VF = 4; 6123 else 6124 VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM); 6125 } 6126 6127 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6128 assert(isPowerOf2_32(VF) && "VF needs to be a power of two"); 6129 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user VF " : "computed VF ") 6130 << VF << " to build VPlans.\n"); 6131 buildVPlans(VF, VF); 6132 6133 // For VPlan build stress testing, we bail out after VPlan construction. 6134 if (VPlanBuildStressTest) 6135 return VectorizationFactor::Disabled(); 6136 6137 return {VF, 0}; 6138 } 6139 6140 LLVM_DEBUG( 6141 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6142 "VPlan-native path.\n"); 6143 return VectorizationFactor::Disabled(); 6144 } 6145 6146 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(bool OptForSize, 6147 unsigned UserVF) { 6148 assert(OrigLoop->empty() && "Inner loop expected."); 6149 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 6150 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 6151 return None; 6152 6153 // Invalidate interleave groups if all blocks of loop will be predicated. 6154 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6155 !useMaskedInterleavedAccesses(*TTI)) { 6156 LLVM_DEBUG( 6157 dbgs() 6158 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6159 "which requires masked-interleaved support.\n"); 6160 CM.InterleaveInfo.reset(); 6161 } 6162 6163 if (UserVF) { 6164 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6165 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6166 // Collect the instructions (and their associated costs) that will be more 6167 // profitable to scalarize. 6168 CM.selectUserVectorizationFactor(UserVF); 6169 buildVPlansWithVPRecipes(UserVF, UserVF); 6170 LLVM_DEBUG(printPlans(dbgs())); 6171 return {{UserVF, 0}}; 6172 } 6173 6174 unsigned MaxVF = MaybeMaxVF.getValue(); 6175 assert(MaxVF != 0 && "MaxVF is zero."); 6176 6177 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6178 // Collect Uniform and Scalar instructions after vectorization with VF. 6179 CM.collectUniformsAndScalars(VF); 6180 6181 // Collect the instructions (and their associated costs) that will be more 6182 // profitable to scalarize. 6183 if (VF > 1) 6184 CM.collectInstsToScalarize(VF); 6185 } 6186 6187 buildVPlansWithVPRecipes(1, MaxVF); 6188 LLVM_DEBUG(printPlans(dbgs())); 6189 if (MaxVF == 1) 6190 return VectorizationFactor::Disabled(); 6191 6192 // Select the optimal vectorization factor. 6193 return CM.selectVectorizationFactor(MaxVF); 6194 } 6195 6196 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6197 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6198 << '\n'); 6199 BestVF = VF; 6200 BestUF = UF; 6201 6202 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6203 return !Plan->hasVF(VF); 6204 }); 6205 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6206 } 6207 6208 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6209 DominatorTree *DT) { 6210 // Perform the actual loop transformation. 6211 6212 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6213 VPCallbackILV CallbackILV(ILV); 6214 6215 VPTransformState State{BestVF, BestUF, LI, 6216 DT, ILV.Builder, ILV.VectorLoopValueMap, 6217 &ILV, CallbackILV}; 6218 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6219 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6220 6221 //===------------------------------------------------===// 6222 // 6223 // Notice: any optimization or new instruction that go 6224 // into the code below should also be implemented in 6225 // the cost-model. 6226 // 6227 //===------------------------------------------------===// 6228 6229 // 2. Copy and widen instructions from the old loop into the new loop. 6230 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6231 VPlans.front()->execute(&State); 6232 6233 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6234 // predication, updating analyses. 6235 ILV.fixVectorizedLoop(); 6236 } 6237 6238 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6239 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6240 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6241 6242 // We create new control-flow for the vectorized loop, so the original 6243 // condition will be dead after vectorization if it's only used by the 6244 // branch. 6245 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6246 if (Cmp && Cmp->hasOneUse()) 6247 DeadInstructions.insert(Cmp); 6248 6249 // We create new "steps" for induction variable updates to which the original 6250 // induction variables map. An original update instruction will be dead if 6251 // all its users except the induction variable are dead. 6252 for (auto &Induction : *Legal->getInductionVars()) { 6253 PHINode *Ind = Induction.first; 6254 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6255 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6256 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6257 DeadInstructions.end(); 6258 })) 6259 DeadInstructions.insert(IndUpdate); 6260 6261 // We record as "Dead" also the type-casting instructions we had identified 6262 // during induction analysis. We don't need any handling for them in the 6263 // vectorized loop because we have proven that, under a proper runtime 6264 // test guarding the vectorized loop, the value of the phi, and the casted 6265 // value of the phi, are the same. The last instruction in this casting chain 6266 // will get its scalar/vector/widened def from the scalar/vector/widened def 6267 // of the respective phi node. Any other casts in the induction def-use chain 6268 // have no other uses outside the phi update chain, and will be ignored. 6269 InductionDescriptor &IndDes = Induction.second; 6270 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6271 DeadInstructions.insert(Casts.begin(), Casts.end()); 6272 } 6273 } 6274 6275 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6276 6277 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6278 6279 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6280 Instruction::BinaryOps BinOp) { 6281 // When unrolling and the VF is 1, we only need to add a simple scalar. 6282 Type *Ty = Val->getType(); 6283 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6284 6285 if (Ty->isFloatingPointTy()) { 6286 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6287 6288 // Floating point operations had to be 'fast' to enable the unrolling. 6289 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6290 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6291 } 6292 Constant *C = ConstantInt::get(Ty, StartIdx); 6293 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6294 } 6295 6296 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6297 SmallVector<Metadata *, 4> MDs; 6298 // Reserve first location for self reference to the LoopID metadata node. 6299 MDs.push_back(nullptr); 6300 bool IsUnrollMetadata = false; 6301 MDNode *LoopID = L->getLoopID(); 6302 if (LoopID) { 6303 // First find existing loop unrolling disable metadata. 6304 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6305 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6306 if (MD) { 6307 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6308 IsUnrollMetadata = 6309 S && S->getString().startswith("llvm.loop.unroll.disable"); 6310 } 6311 MDs.push_back(LoopID->getOperand(i)); 6312 } 6313 } 6314 6315 if (!IsUnrollMetadata) { 6316 // Add runtime unroll disable metadata. 6317 LLVMContext &Context = L->getHeader()->getContext(); 6318 SmallVector<Metadata *, 1> DisableOperands; 6319 DisableOperands.push_back( 6320 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6321 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6322 MDs.push_back(DisableNode); 6323 MDNode *NewLoopID = MDNode::get(Context, MDs); 6324 // Set operand 0 to refer to the loop id itself. 6325 NewLoopID->replaceOperandWith(0, NewLoopID); 6326 L->setLoopID(NewLoopID); 6327 } 6328 } 6329 6330 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6331 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6332 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6333 bool PredicateAtRangeStart = Predicate(Range.Start); 6334 6335 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6336 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6337 Range.End = TmpVF; 6338 break; 6339 } 6340 6341 return PredicateAtRangeStart; 6342 } 6343 6344 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6345 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6346 /// of VF's starting at a given VF and extending it as much as possible. Each 6347 /// vectorization decision can potentially shorten this sub-range during 6348 /// buildVPlan(). 6349 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6350 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6351 VFRange SubRange = {VF, MaxVF + 1}; 6352 VPlans.push_back(buildVPlan(SubRange)); 6353 VF = SubRange.End; 6354 } 6355 } 6356 6357 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6358 VPlanPtr &Plan) { 6359 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6360 6361 // Look for cached value. 6362 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6363 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6364 if (ECEntryIt != EdgeMaskCache.end()) 6365 return ECEntryIt->second; 6366 6367 VPValue *SrcMask = createBlockInMask(Src, Plan); 6368 6369 // The terminator has to be a branch inst! 6370 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6371 assert(BI && "Unexpected terminator found"); 6372 6373 if (!BI->isConditional()) 6374 return EdgeMaskCache[Edge] = SrcMask; 6375 6376 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6377 assert(EdgeMask && "No Edge Mask found for condition"); 6378 6379 if (BI->getSuccessor(0) != Dst) 6380 EdgeMask = Builder.createNot(EdgeMask); 6381 6382 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6383 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6384 6385 return EdgeMaskCache[Edge] = EdgeMask; 6386 } 6387 6388 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6389 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6390 6391 // Look for cached value. 6392 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6393 if (BCEntryIt != BlockMaskCache.end()) 6394 return BCEntryIt->second; 6395 6396 // All-one mask is modelled as no-mask following the convention for masked 6397 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6398 VPValue *BlockMask = nullptr; 6399 6400 if (OrigLoop->getHeader() == BB) { 6401 if (!CM.blockNeedsPredication(BB)) 6402 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6403 6404 // Introduce the early-exit compare IV <= BTC to form header block mask. 6405 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6406 VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6407 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6408 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6409 return BlockMaskCache[BB] = BlockMask; 6410 } 6411 6412 // This is the block mask. We OR all incoming edges. 6413 for (auto *Predecessor : predecessors(BB)) { 6414 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6415 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6416 return BlockMaskCache[BB] = EdgeMask; 6417 6418 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6419 BlockMask = EdgeMask; 6420 continue; 6421 } 6422 6423 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6424 } 6425 6426 return BlockMaskCache[BB] = BlockMask; 6427 } 6428 6429 VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I, 6430 VFRange &Range, 6431 VPlanPtr &Plan) { 6432 const InterleaveGroup<Instruction> *IG = CM.getInterleavedAccessGroup(I); 6433 if (!IG) 6434 return nullptr; 6435 6436 // Now check if IG is relevant for VF's in the given range. 6437 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 6438 return [=](unsigned VF) -> bool { 6439 return (VF >= 2 && // Query is illegal for VF == 1 6440 CM.getWideningDecision(I, VF) == 6441 LoopVectorizationCostModel::CM_Interleave); 6442 }; 6443 }; 6444 if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range)) 6445 return nullptr; 6446 6447 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 6448 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 6449 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 6450 assert(I == IG->getInsertPos() && 6451 "Generating a recipe for an adjunct member of an interleave group"); 6452 6453 VPValue *Mask = nullptr; 6454 if (Legal->isMaskRequired(I)) 6455 Mask = createBlockInMask(I->getParent(), Plan); 6456 6457 return new VPInterleaveRecipe(IG, Mask); 6458 } 6459 6460 VPWidenMemoryInstructionRecipe * 6461 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6462 VPlanPtr &Plan) { 6463 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 6464 return nullptr; 6465 6466 auto willWiden = [&](unsigned VF) -> bool { 6467 if (VF == 1) 6468 return false; 6469 if (CM.isScalarAfterVectorization(I, VF) || 6470 CM.isProfitableToScalarize(I, VF)) 6471 return false; 6472 LoopVectorizationCostModel::InstWidening Decision = 6473 CM.getWideningDecision(I, VF); 6474 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6475 "CM decision should be taken at this point."); 6476 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 6477 "Interleave memory opportunity should be caught earlier."); 6478 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6479 }; 6480 6481 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6482 return nullptr; 6483 6484 VPValue *Mask = nullptr; 6485 if (Legal->isMaskRequired(I)) 6486 Mask = createBlockInMask(I->getParent(), Plan); 6487 6488 return new VPWidenMemoryInstructionRecipe(*I, Mask); 6489 } 6490 6491 VPWidenIntOrFpInductionRecipe * 6492 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) { 6493 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 6494 // Check if this is an integer or fp induction. If so, build the recipe that 6495 // produces its scalar and vector values. 6496 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 6497 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6498 II.getKind() == InductionDescriptor::IK_FpInduction) 6499 return new VPWidenIntOrFpInductionRecipe(Phi); 6500 6501 return nullptr; 6502 } 6503 6504 // Optimize the special case where the source is a constant integer 6505 // induction variable. Notice that we can only optimize the 'trunc' case 6506 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6507 // (c) other casts depend on pointer size. 6508 6509 // Determine whether \p K is a truncation based on an induction variable that 6510 // can be optimized. 6511 auto isOptimizableIVTruncate = 6512 [&](Instruction *K) -> std::function<bool(unsigned)> { 6513 return 6514 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6515 }; 6516 6517 if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange( 6518 isOptimizableIVTruncate(I), Range)) 6519 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6520 cast<TruncInst>(I)); 6521 return nullptr; 6522 } 6523 6524 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) { 6525 PHINode *Phi = dyn_cast<PHINode>(I); 6526 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 6527 return nullptr; 6528 6529 // We know that all PHIs in non-header blocks are converted into selects, so 6530 // we don't have to worry about the insertion order and we can just use the 6531 // builder. At this point we generate the predication tree. There may be 6532 // duplications since this is a simple recursive scan, but future 6533 // optimizations will clean it up. 6534 6535 SmallVector<VPValue *, 2> Masks; 6536 unsigned NumIncoming = Phi->getNumIncomingValues(); 6537 for (unsigned In = 0; In < NumIncoming; In++) { 6538 VPValue *EdgeMask = 6539 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6540 assert((EdgeMask || NumIncoming == 1) && 6541 "Multiple predecessors with one having a full mask"); 6542 if (EdgeMask) 6543 Masks.push_back(EdgeMask); 6544 } 6545 return new VPBlendRecipe(Phi, Masks); 6546 } 6547 6548 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 6549 VFRange &Range) { 6550 6551 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6552 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6553 6554 if (IsPredicated) 6555 return false; 6556 6557 auto IsVectorizableOpcode = [](unsigned Opcode) { 6558 switch (Opcode) { 6559 case Instruction::Add: 6560 case Instruction::And: 6561 case Instruction::AShr: 6562 case Instruction::BitCast: 6563 case Instruction::Br: 6564 case Instruction::Call: 6565 case Instruction::FAdd: 6566 case Instruction::FCmp: 6567 case Instruction::FDiv: 6568 case Instruction::FMul: 6569 case Instruction::FPExt: 6570 case Instruction::FPToSI: 6571 case Instruction::FPToUI: 6572 case Instruction::FPTrunc: 6573 case Instruction::FRem: 6574 case Instruction::FSub: 6575 case Instruction::GetElementPtr: 6576 case Instruction::ICmp: 6577 case Instruction::IntToPtr: 6578 case Instruction::Load: 6579 case Instruction::LShr: 6580 case Instruction::Mul: 6581 case Instruction::Or: 6582 case Instruction::PHI: 6583 case Instruction::PtrToInt: 6584 case Instruction::SDiv: 6585 case Instruction::Select: 6586 case Instruction::SExt: 6587 case Instruction::Shl: 6588 case Instruction::SIToFP: 6589 case Instruction::SRem: 6590 case Instruction::Store: 6591 case Instruction::Sub: 6592 case Instruction::Trunc: 6593 case Instruction::UDiv: 6594 case Instruction::UIToFP: 6595 case Instruction::URem: 6596 case Instruction::Xor: 6597 case Instruction::ZExt: 6598 return true; 6599 } 6600 return false; 6601 }; 6602 6603 if (!IsVectorizableOpcode(I->getOpcode())) 6604 return false; 6605 6606 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6607 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6608 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6609 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6610 return false; 6611 } 6612 6613 auto willWiden = [&](unsigned VF) -> bool { 6614 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 6615 CM.isProfitableToScalarize(I, VF))) 6616 return false; 6617 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6618 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6619 // The following case may be scalarized depending on the VF. 6620 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6621 // version of the instruction. 6622 // Is it beneficial to perform intrinsic call compared to lib call? 6623 bool NeedToScalarize; 6624 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 6625 bool UseVectorIntrinsic = 6626 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 6627 return UseVectorIntrinsic || !NeedToScalarize; 6628 } 6629 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 6630 assert(CM.getWideningDecision(I, VF) == 6631 LoopVectorizationCostModel::CM_Scalarize && 6632 "Memory widening decisions should have been taken care by now"); 6633 return false; 6634 } 6635 return true; 6636 }; 6637 6638 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6639 return false; 6640 6641 // Success: widen this instruction. We optimize the common case where 6642 // consecutive instructions can be represented by a single recipe. 6643 if (!VPBB->empty()) { 6644 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 6645 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 6646 return true; 6647 } 6648 6649 VPBB->appendRecipe(new VPWidenRecipe(I)); 6650 return true; 6651 } 6652 6653 VPBasicBlock *VPRecipeBuilder::handleReplication( 6654 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 6655 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 6656 VPlanPtr &Plan) { 6657 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 6658 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 6659 Range); 6660 6661 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6662 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6663 6664 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 6665 6666 // Find if I uses a predicated instruction. If so, it will use its scalar 6667 // value. Avoid hoisting the insert-element which packs the scalar value into 6668 // a vector value, as that happens iff all users use the vector value. 6669 for (auto &Op : I->operands()) 6670 if (auto *PredInst = dyn_cast<Instruction>(Op)) 6671 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 6672 PredInst2Recipe[PredInst]->setAlsoPack(false); 6673 6674 // Finalize the recipe for Instr, first if it is not predicated. 6675 if (!IsPredicated) { 6676 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 6677 VPBB->appendRecipe(Recipe); 6678 return VPBB; 6679 } 6680 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 6681 assert(VPBB->getSuccessors().empty() && 6682 "VPBB has successors when handling predicated replication."); 6683 // Record predicated instructions for above packing optimizations. 6684 PredInst2Recipe[I] = Recipe; 6685 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 6686 VPBlockUtils::insertBlockAfter(Region, VPBB); 6687 auto *RegSucc = new VPBasicBlock(); 6688 VPBlockUtils::insertBlockAfter(RegSucc, Region); 6689 return RegSucc; 6690 } 6691 6692 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 6693 VPRecipeBase *PredRecipe, 6694 VPlanPtr &Plan) { 6695 // Instructions marked for predication are replicated and placed under an 6696 // if-then construct to prevent side-effects. 6697 6698 // Generate recipes to compute the block mask for this region. 6699 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 6700 6701 // Build the triangular if-then region. 6702 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 6703 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 6704 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 6705 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 6706 auto *PHIRecipe = 6707 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 6708 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 6709 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 6710 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 6711 6712 // Note: first set Entry as region entry and then connect successors starting 6713 // from it in order, to propagate the "parent" of each VPBasicBlock. 6714 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 6715 VPBlockUtils::connectBlocks(Pred, Exit); 6716 6717 return Region; 6718 } 6719 6720 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range, 6721 VPlanPtr &Plan, VPBasicBlock *VPBB) { 6722 VPRecipeBase *Recipe = nullptr; 6723 // Check if Instr should belong to an interleave memory recipe, or already 6724 // does. In the latter case Instr is irrelevant. 6725 if ((Recipe = tryToInterleaveMemory(Instr, Range, Plan))) { 6726 VPBB->appendRecipe(Recipe); 6727 return true; 6728 } 6729 6730 // Check if Instr is a memory operation that should be widened. 6731 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 6732 VPBB->appendRecipe(Recipe); 6733 return true; 6734 } 6735 6736 // Check if Instr should form some PHI recipe. 6737 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 6738 VPBB->appendRecipe(Recipe); 6739 return true; 6740 } 6741 if ((Recipe = tryToBlend(Instr, Plan))) { 6742 VPBB->appendRecipe(Recipe); 6743 return true; 6744 } 6745 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 6746 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 6747 return true; 6748 } 6749 6750 // Check if Instr is to be widened by a general VPWidenRecipe, after 6751 // having first checked for specific widening recipes that deal with 6752 // Interleave Groups, Inductions and Phi nodes. 6753 if (tryToWiden(Instr, VPBB, Range)) 6754 return true; 6755 6756 return false; 6757 } 6758 6759 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 6760 unsigned MaxVF) { 6761 assert(OrigLoop->empty() && "Inner loop expected."); 6762 6763 // Collect conditions feeding internal conditional branches; they need to be 6764 // represented in VPlan for it to model masking. 6765 SmallPtrSet<Value *, 1> NeedDef; 6766 6767 auto *Latch = OrigLoop->getLoopLatch(); 6768 for (BasicBlock *BB : OrigLoop->blocks()) { 6769 if (BB == Latch) 6770 continue; 6771 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 6772 if (Branch && Branch->isConditional()) 6773 NeedDef.insert(Branch->getCondition()); 6774 } 6775 6776 // If the tail is to be folded by masking, the primary induction variable 6777 // needs to be represented in VPlan for it to model early-exit masking. 6778 if (CM.foldTailByMasking()) 6779 NeedDef.insert(Legal->getPrimaryInduction()); 6780 6781 // Collect instructions from the original loop that will become trivially dead 6782 // in the vectorized loop. We don't need to vectorize these instructions. For 6783 // example, original induction update instructions can become dead because we 6784 // separately emit induction "steps" when generating code for the new loop. 6785 // Similarly, we create a new latch condition when setting up the structure 6786 // of the new loop, so the old one can become dead. 6787 SmallPtrSet<Instruction *, 4> DeadInstructions; 6788 collectTriviallyDeadInstructions(DeadInstructions); 6789 6790 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6791 VFRange SubRange = {VF, MaxVF + 1}; 6792 VPlans.push_back( 6793 buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions)); 6794 VF = SubRange.End; 6795 } 6796 } 6797 6798 LoopVectorizationPlanner::VPlanPtr 6799 LoopVectorizationPlanner::buildVPlanWithVPRecipes( 6800 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 6801 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6802 // Hold a mapping from predicated instructions to their recipes, in order to 6803 // fix their AlsoPack behavior if a user is determined to replicate and use a 6804 // scalar instead of vector value. 6805 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 6806 6807 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 6808 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 6809 6810 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 6811 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 6812 auto Plan = llvm::make_unique<VPlan>(VPBB); 6813 6814 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, TTI, Legal, CM, Builder); 6815 // Represent values that will have defs inside VPlan. 6816 for (Value *V : NeedDef) 6817 Plan->addVPValue(V); 6818 6819 // Scan the body of the loop in a topological order to visit each basic block 6820 // after having visited its predecessor basic blocks. 6821 LoopBlocksDFS DFS(OrigLoop); 6822 DFS.perform(LI); 6823 6824 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6825 // Relevant instructions from basic block BB will be grouped into VPRecipe 6826 // ingredients and fill a new VPBasicBlock. 6827 unsigned VPBBsForBB = 0; 6828 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 6829 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 6830 VPBB = FirstVPBBForBB; 6831 Builder.setInsertPoint(VPBB); 6832 6833 std::vector<Instruction *> Ingredients; 6834 6835 // Organize the ingredients to vectorize from current basic block in the 6836 // right order. 6837 for (Instruction &I : BB->instructionsWithoutDebug()) { 6838 Instruction *Instr = &I; 6839 6840 // First filter out irrelevant instructions, to ensure no recipes are 6841 // built for them. 6842 if (isa<BranchInst>(Instr) || 6843 DeadInstructions.find(Instr) != DeadInstructions.end()) 6844 continue; 6845 6846 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 6847 // member of the IG, do not construct any Recipe for it. 6848 const InterleaveGroup<Instruction> *IG = 6849 CM.getInterleavedAccessGroup(Instr); 6850 if (IG && Instr != IG->getInsertPos() && 6851 Range.Start >= 2 && // Query is illegal for VF == 1 6852 CM.getWideningDecision(Instr, Range.Start) == 6853 LoopVectorizationCostModel::CM_Interleave) { 6854 auto SinkCandidate = SinkAfterInverse.find(Instr); 6855 if (SinkCandidate != SinkAfterInverse.end()) 6856 Ingredients.push_back(SinkCandidate->second); 6857 continue; 6858 } 6859 6860 // Move instructions to handle first-order recurrences, step 1: avoid 6861 // handling this instruction until after we've handled the instruction it 6862 // should follow. 6863 auto SAIt = SinkAfter.find(Instr); 6864 if (SAIt != SinkAfter.end()) { 6865 LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" 6866 << *SAIt->second 6867 << " to vectorize a 1st order recurrence.\n"); 6868 SinkAfterInverse[SAIt->second] = Instr; 6869 continue; 6870 } 6871 6872 Ingredients.push_back(Instr); 6873 6874 // Move instructions to handle first-order recurrences, step 2: push the 6875 // instruction to be sunk at its insertion point. 6876 auto SAInvIt = SinkAfterInverse.find(Instr); 6877 if (SAInvIt != SinkAfterInverse.end()) 6878 Ingredients.push_back(SAInvIt->second); 6879 } 6880 6881 // Introduce each ingredient into VPlan. 6882 for (Instruction *Instr : Ingredients) { 6883 if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB)) 6884 continue; 6885 6886 // Otherwise, if all widening options failed, Instruction is to be 6887 // replicated. This may create a successor for VPBB. 6888 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 6889 Instr, Range, VPBB, PredInst2Recipe, Plan); 6890 if (NextVPBB != VPBB) { 6891 VPBB = NextVPBB; 6892 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 6893 : ""); 6894 } 6895 } 6896 } 6897 6898 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 6899 // may also be empty, such as the last one VPBB, reflecting original 6900 // basic-blocks with no recipes. 6901 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 6902 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 6903 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 6904 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 6905 delete PreEntry; 6906 6907 std::string PlanName; 6908 raw_string_ostream RSO(PlanName); 6909 unsigned VF = Range.Start; 6910 Plan->addVF(VF); 6911 RSO << "Initial VPlan for VF={" << VF; 6912 for (VF *= 2; VF < Range.End; VF *= 2) { 6913 Plan->addVF(VF); 6914 RSO << "," << VF; 6915 } 6916 RSO << "},UF>=1"; 6917 RSO.flush(); 6918 Plan->setName(PlanName); 6919 6920 return Plan; 6921 } 6922 6923 LoopVectorizationPlanner::VPlanPtr 6924 LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 6925 // Outer loop handling: They may require CFG and instruction level 6926 // transformations before even evaluating whether vectorization is profitable. 6927 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6928 // the vectorization pipeline. 6929 assert(!OrigLoop->empty()); 6930 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6931 6932 // Create new empty VPlan 6933 auto Plan = llvm::make_unique<VPlan>(); 6934 6935 // Build hierarchical CFG 6936 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 6937 HCFGBuilder.buildHierarchicalCFG(); 6938 6939 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 6940 Plan->addVF(VF); 6941 6942 if (EnableVPlanPredication) { 6943 VPlanPredicator VPP(*Plan); 6944 VPP.predicate(); 6945 6946 // Avoid running transformation to recipes until masked code generation in 6947 // VPlan-native path is in place. 6948 return Plan; 6949 } 6950 6951 SmallPtrSet<Instruction *, 1> DeadInstructions; 6952 VPlanHCFGTransforms::VPInstructionsToVPRecipes( 6953 Plan, Legal->getInductionVars(), DeadInstructions); 6954 6955 return Plan; 6956 } 6957 6958 Value* LoopVectorizationPlanner::VPCallbackILV:: 6959 getOrCreateVectorValues(Value *V, unsigned Part) { 6960 return ILV.getOrCreateVectorValue(V, Part); 6961 } 6962 6963 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 6964 O << " +\n" 6965 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 6966 IG->getInsertPos()->printAsOperand(O, false); 6967 if (User) { 6968 O << ", "; 6969 User->getOperand(0)->printAsOperand(O); 6970 } 6971 O << "\\l\""; 6972 for (unsigned i = 0; i < IG->getFactor(); ++i) 6973 if (Instruction *I = IG->getMember(i)) 6974 O << " +\n" 6975 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 6976 } 6977 6978 void VPWidenRecipe::execute(VPTransformState &State) { 6979 for (auto &Instr : make_range(Begin, End)) 6980 State.ILV->widenInstruction(Instr); 6981 } 6982 6983 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 6984 assert(!State.Instance && "Int or FP induction being replicated."); 6985 State.ILV->widenIntOrFpInduction(IV, Trunc); 6986 } 6987 6988 void VPWidenPHIRecipe::execute(VPTransformState &State) { 6989 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 6990 } 6991 6992 void VPBlendRecipe::execute(VPTransformState &State) { 6993 State.ILV->setDebugLocFromInst(State.Builder, Phi); 6994 // We know that all PHIs in non-header blocks are converted into 6995 // selects, so we don't have to worry about the insertion order and we 6996 // can just use the builder. 6997 // At this point we generate the predication tree. There may be 6998 // duplications since this is a simple recursive scan, but future 6999 // optimizations will clean it up. 7000 7001 unsigned NumIncoming = Phi->getNumIncomingValues(); 7002 7003 assert((User || NumIncoming == 1) && 7004 "Multiple predecessors with predecessors having a full mask"); 7005 // Generate a sequence of selects of the form: 7006 // SELECT(Mask3, In3, 7007 // SELECT(Mask2, In2, 7008 // ( ...))) 7009 InnerLoopVectorizer::VectorParts Entry(State.UF); 7010 for (unsigned In = 0; In < NumIncoming; ++In) { 7011 for (unsigned Part = 0; Part < State.UF; ++Part) { 7012 // We might have single edge PHIs (blocks) - use an identity 7013 // 'select' for the first PHI operand. 7014 Value *In0 = 7015 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 7016 if (In == 0) 7017 Entry[Part] = In0; // Initialize with the first incoming value. 7018 else { 7019 // Select between the current value and the previous incoming edge 7020 // based on the incoming mask. 7021 Value *Cond = State.get(User->getOperand(In), Part); 7022 Entry[Part] = 7023 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7024 } 7025 } 7026 } 7027 for (unsigned Part = 0; Part < State.UF; ++Part) 7028 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7029 } 7030 7031 void VPInterleaveRecipe::execute(VPTransformState &State) { 7032 assert(!State.Instance && "Interleave group being replicated."); 7033 if (!User) 7034 return State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 7035 7036 // Last (and currently only) operand is a mask. 7037 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7038 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7039 for (unsigned Part = 0; Part < State.UF; ++Part) 7040 MaskValues[Part] = State.get(Mask, Part); 7041 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), &MaskValues); 7042 } 7043 7044 void VPReplicateRecipe::execute(VPTransformState &State) { 7045 if (State.Instance) { // Generate a single instance. 7046 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 7047 // Insert scalar instance packing it into a vector. 7048 if (AlsoPack && State.VF > 1) { 7049 // If we're constructing lane 0, initialize to start from undef. 7050 if (State.Instance->Lane == 0) { 7051 Value *Undef = 7052 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7053 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7054 } 7055 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7056 } 7057 return; 7058 } 7059 7060 // Generate scalar instances for all VF lanes of all UF parts, unless the 7061 // instruction is uniform inwhich case generate only the first lane for each 7062 // of the UF parts. 7063 unsigned EndLane = IsUniform ? 1 : State.VF; 7064 for (unsigned Part = 0; Part < State.UF; ++Part) 7065 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7066 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7067 } 7068 7069 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7070 assert(State.Instance && "Branch on Mask works only on single instance."); 7071 7072 unsigned Part = State.Instance->Part; 7073 unsigned Lane = State.Instance->Lane; 7074 7075 Value *ConditionBit = nullptr; 7076 if (!User) // Block in mask is all-one. 7077 ConditionBit = State.Builder.getTrue(); 7078 else { 7079 VPValue *BlockInMask = User->getOperand(0); 7080 ConditionBit = State.get(BlockInMask, Part); 7081 if (ConditionBit->getType()->isVectorTy()) 7082 ConditionBit = State.Builder.CreateExtractElement( 7083 ConditionBit, State.Builder.getInt32(Lane)); 7084 } 7085 7086 // Replace the temporary unreachable terminator with a new conditional branch, 7087 // whose two destinations will be set later when they are created. 7088 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7089 assert(isa<UnreachableInst>(CurrentTerminator) && 7090 "Expected to replace unreachable terminator with conditional branch."); 7091 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7092 CondBr->setSuccessor(0, nullptr); 7093 ReplaceInstWithInst(CurrentTerminator, CondBr); 7094 } 7095 7096 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7097 assert(State.Instance && "Predicated instruction PHI works per instance."); 7098 Instruction *ScalarPredInst = cast<Instruction>( 7099 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7100 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7101 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7102 assert(PredicatingBB && "Predicated block has no single predecessor."); 7103 7104 // By current pack/unpack logic we need to generate only a single phi node: if 7105 // a vector value for the predicated instruction exists at this point it means 7106 // the instruction has vector users only, and a phi for the vector value is 7107 // needed. In this case the recipe of the predicated instruction is marked to 7108 // also do that packing, thereby "hoisting" the insert-element sequence. 7109 // Otherwise, a phi node for the scalar value is needed. 7110 unsigned Part = State.Instance->Part; 7111 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7112 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7113 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7114 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7115 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7116 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7117 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7118 } else { 7119 Type *PredInstType = PredInst->getType(); 7120 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7121 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7122 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7123 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7124 } 7125 } 7126 7127 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7128 if (!User) 7129 return State.ILV->vectorizeMemoryInstruction(&Instr); 7130 7131 // Last (and currently only) operand is a mask. 7132 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7133 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7134 for (unsigned Part = 0; Part < State.UF; ++Part) 7135 MaskValues[Part] = State.get(Mask, Part); 7136 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 7137 } 7138 7139 // Process the loop in the VPlan-native vectorization path. This path builds 7140 // VPlan upfront in the vectorization pipeline, which allows to apply 7141 // VPlan-to-VPlan transformations from the very beginning without modifying the 7142 // input LLVM IR. 7143 static bool processLoopInVPlanNativePath( 7144 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7145 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7146 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7147 OptimizationRemarkEmitter *ORE, LoopVectorizeHints &Hints) { 7148 7149 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7150 Function *F = L->getHeader()->getParent(); 7151 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7152 LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7153 &Hints, IAI); 7154 // Use the planner for outer loop vectorization. 7155 // TODO: CM is not used at this point inside the planner. Turn CM into an 7156 // optional argument if we don't need it in the future. 7157 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM); 7158 7159 // Get user vectorization factor. 7160 const unsigned UserVF = Hints.getWidth(); 7161 7162 // Check the function attributes to find out if this function should be 7163 // optimized for size. 7164 bool OptForSize = 7165 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7166 7167 // Plan how to best vectorize, return the best VF and its cost. 7168 const VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF); 7169 7170 // If we are stress testing VPlan builds, do not attempt to generate vector 7171 // code. Masked vector code generation support will follow soon. 7172 // Also, do not attempt to vectorize if no vector code will be produced. 7173 if (VPlanBuildStressTest || EnableVPlanPredication || 7174 VectorizationFactor::Disabled() == VF) 7175 return false; 7176 7177 LVP.setBestPlan(VF.Width, 1); 7178 7179 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 7180 &CM); 7181 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7182 << L->getHeader()->getParent()->getName() << "\"\n"); 7183 LVP.executePlan(LB, DT); 7184 7185 // Mark the loop as already vectorized to avoid vectorizing again. 7186 Hints.setAlreadyVectorized(); 7187 7188 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7189 return true; 7190 } 7191 7192 bool LoopVectorizePass::processLoop(Loop *L) { 7193 assert((EnableVPlanNativePath || L->empty()) && 7194 "VPlan-native path is not enabled. Only process inner loops."); 7195 7196 #ifndef NDEBUG 7197 const std::string DebugLocStr = getDebugLocString(L); 7198 #endif /* NDEBUG */ 7199 7200 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7201 << L->getHeader()->getParent()->getName() << "\" from " 7202 << DebugLocStr << "\n"); 7203 7204 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 7205 7206 LLVM_DEBUG( 7207 dbgs() << "LV: Loop hints:" 7208 << " force=" 7209 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7210 ? "disabled" 7211 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7212 ? "enabled" 7213 : "?")) 7214 << " width=" << Hints.getWidth() 7215 << " unroll=" << Hints.getInterleave() << "\n"); 7216 7217 // Function containing loop 7218 Function *F = L->getHeader()->getParent(); 7219 7220 // Looking at the diagnostic output is the only way to determine if a loop 7221 // was vectorized (other than looking at the IR or machine code), so it 7222 // is important to generate an optimization remark for each loop. Most of 7223 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7224 // generated as OptimizationRemark and OptimizationRemarkMissed are 7225 // less verbose reporting vectorized loops and unvectorized loops that may 7226 // benefit from vectorization, respectively. 7227 7228 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 7229 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7230 return false; 7231 } 7232 7233 PredicatedScalarEvolution PSE(*SE, *L); 7234 7235 // Check if it is legal to vectorize the loop. 7236 LoopVectorizationRequirements Requirements(*ORE); 7237 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE, 7238 &Requirements, &Hints, DB, AC); 7239 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7240 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7241 Hints.emitRemarkWithHints(); 7242 return false; 7243 } 7244 7245 // Check the function attributes to find out if this function should be 7246 // optimized for size. 7247 bool OptForSize = 7248 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7249 7250 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7251 // here. They may require CFG and instruction level transformations before 7252 // even evaluating whether vectorization is profitable. Since we cannot modify 7253 // the incoming IR, we need to build VPlan upfront in the vectorization 7254 // pipeline. 7255 if (!L->empty()) 7256 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7257 ORE, Hints); 7258 7259 assert(L->empty() && "Inner loop expected."); 7260 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7261 // count by optimizing for size, to minimize overheads. 7262 // Prefer constant trip counts over profile data, over upper bound estimate. 7263 unsigned ExpectedTC = 0; 7264 bool HasExpectedTC = false; 7265 if (const SCEVConstant *ConstExits = 7266 dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) { 7267 const APInt &ExitsCount = ConstExits->getAPInt(); 7268 // We are interested in small values for ExpectedTC. Skip over those that 7269 // can't fit an unsigned. 7270 if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) { 7271 ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1; 7272 HasExpectedTC = true; 7273 } 7274 } 7275 // ExpectedTC may be large because it's bound by a variable. Check 7276 // profiling information to validate we should vectorize. 7277 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 7278 auto EstimatedTC = getLoopEstimatedTripCount(L); 7279 if (EstimatedTC) { 7280 ExpectedTC = *EstimatedTC; 7281 HasExpectedTC = true; 7282 } 7283 } 7284 if (!HasExpectedTC) { 7285 ExpectedTC = SE->getSmallConstantMaxTripCount(L); 7286 HasExpectedTC = (ExpectedTC > 0); 7287 } 7288 7289 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 7290 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7291 << "This loop is worth vectorizing only if no scalar " 7292 << "iteration overheads are incurred."); 7293 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7294 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7295 else { 7296 LLVM_DEBUG(dbgs() << "\n"); 7297 // Loops with a very small trip count are considered for vectorization 7298 // under OptForSize, thereby making sure the cost of their loop body is 7299 // dominant, free of runtime guards and scalar iteration overheads. 7300 OptForSize = true; 7301 } 7302 } 7303 7304 // Check the function attributes to see if implicit floats are allowed. 7305 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7306 // an integer loop and the vector instructions selected are purely integer 7307 // vector instructions? 7308 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7309 LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7310 "attribute is used.\n"); 7311 ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7312 "NoImplicitFloat", L) 7313 << "loop not vectorized due to NoImplicitFloat attribute"); 7314 Hints.emitRemarkWithHints(); 7315 return false; 7316 } 7317 7318 // Check if the target supports potentially unsafe FP vectorization. 7319 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7320 // for the target we're vectorizing for, to make sure none of the 7321 // additional fp-math flags can help. 7322 if (Hints.isPotentiallyUnsafe() && 7323 TTI->isFPVectorizationPotentiallyUnsafe()) { 7324 LLVM_DEBUG( 7325 dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7326 ORE->emit( 7327 createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7328 << "loop not vectorized due to unsafe FP support."); 7329 Hints.emitRemarkWithHints(); 7330 return false; 7331 } 7332 7333 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7334 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7335 7336 // If an override option has been passed in for interleaved accesses, use it. 7337 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7338 UseInterleaved = EnableInterleavedMemAccesses; 7339 7340 // Analyze interleaved memory accesses. 7341 if (UseInterleaved) { 7342 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7343 } 7344 7345 // Use the cost model. 7346 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7347 &Hints, IAI); 7348 CM.collectValuesToIgnore(); 7349 7350 // Use the planner for vectorization. 7351 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 7352 7353 // Get user vectorization factor. 7354 unsigned UserVF = Hints.getWidth(); 7355 7356 // Plan how to best vectorize, return the best VF and its cost. 7357 Optional<VectorizationFactor> MaybeVF = LVP.plan(OptForSize, UserVF); 7358 7359 VectorizationFactor VF = VectorizationFactor::Disabled(); 7360 unsigned IC = 1; 7361 unsigned UserIC = Hints.getInterleave(); 7362 7363 if (MaybeVF) { 7364 VF = *MaybeVF; 7365 // Select the interleave count. 7366 IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7367 } 7368 7369 // Identify the diagnostic messages that should be produced. 7370 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7371 bool VectorizeLoop = true, InterleaveLoop = true; 7372 if (Requirements.doesNotMeet(F, L, Hints)) { 7373 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7374 "requirements.\n"); 7375 Hints.emitRemarkWithHints(); 7376 return false; 7377 } 7378 7379 if (VF.Width == 1) { 7380 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7381 VecDiagMsg = std::make_pair( 7382 "VectorizationNotBeneficial", 7383 "the cost-model indicates that vectorization is not beneficial"); 7384 VectorizeLoop = false; 7385 } 7386 7387 if (!MaybeVF && UserIC > 1) { 7388 // Tell the user interleaving was avoided up-front, despite being explicitly 7389 // requested. 7390 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 7391 "interleaving should be avoided up front\n"); 7392 IntDiagMsg = std::make_pair( 7393 "InterleavingAvoided", 7394 "Ignoring UserIC, because interleaving was avoided up front"); 7395 InterleaveLoop = false; 7396 } else if (IC == 1 && UserIC <= 1) { 7397 // Tell the user interleaving is not beneficial. 7398 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7399 IntDiagMsg = std::make_pair( 7400 "InterleavingNotBeneficial", 7401 "the cost-model indicates that interleaving is not beneficial"); 7402 InterleaveLoop = false; 7403 if (UserIC == 1) { 7404 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7405 IntDiagMsg.second += 7406 " and is explicitly disabled or interleave count is set to 1"; 7407 } 7408 } else if (IC > 1 && UserIC == 1) { 7409 // Tell the user interleaving is beneficial, but it explicitly disabled. 7410 LLVM_DEBUG( 7411 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7412 IntDiagMsg = std::make_pair( 7413 "InterleavingBeneficialButDisabled", 7414 "the cost-model indicates that interleaving is beneficial " 7415 "but is explicitly disabled or interleave count is set to 1"); 7416 InterleaveLoop = false; 7417 } 7418 7419 // Override IC if user provided an interleave count. 7420 IC = UserIC > 0 ? UserIC : IC; 7421 7422 // Emit diagnostic messages, if any. 7423 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7424 if (!VectorizeLoop && !InterleaveLoop) { 7425 // Do not vectorize or interleaving the loop. 7426 ORE->emit([&]() { 7427 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7428 L->getStartLoc(), L->getHeader()) 7429 << VecDiagMsg.second; 7430 }); 7431 ORE->emit([&]() { 7432 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7433 L->getStartLoc(), L->getHeader()) 7434 << IntDiagMsg.second; 7435 }); 7436 return false; 7437 } else if (!VectorizeLoop && InterleaveLoop) { 7438 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7439 ORE->emit([&]() { 7440 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7441 L->getStartLoc(), L->getHeader()) 7442 << VecDiagMsg.second; 7443 }); 7444 } else if (VectorizeLoop && !InterleaveLoop) { 7445 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7446 << ") in " << DebugLocStr << '\n'); 7447 ORE->emit([&]() { 7448 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7449 L->getStartLoc(), L->getHeader()) 7450 << IntDiagMsg.second; 7451 }); 7452 } else if (VectorizeLoop && InterleaveLoop) { 7453 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7454 << ") in " << DebugLocStr << '\n'); 7455 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7456 } 7457 7458 LVP.setBestPlan(VF.Width, IC); 7459 7460 using namespace ore; 7461 bool DisableRuntimeUnroll = false; 7462 MDNode *OrigLoopID = L->getLoopID(); 7463 7464 if (!VectorizeLoop) { 7465 assert(IC > 1 && "interleave count should not be 1 or 0"); 7466 // If we decided that it is not legal to vectorize the loop, then 7467 // interleave it. 7468 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7469 &CM); 7470 LVP.executePlan(Unroller, DT); 7471 7472 ORE->emit([&]() { 7473 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7474 L->getHeader()) 7475 << "interleaved loop (interleaved count: " 7476 << NV("InterleaveCount", IC) << ")"; 7477 }); 7478 } else { 7479 // If we decided that it is *legal* to vectorize the loop, then do it. 7480 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7481 &LVL, &CM); 7482 LVP.executePlan(LB, DT); 7483 ++LoopsVectorized; 7484 7485 // Add metadata to disable runtime unrolling a scalar loop when there are 7486 // no runtime checks about strides and memory. A scalar loop that is 7487 // rarely used is not worth unrolling. 7488 if (!LB.areSafetyChecksAdded()) 7489 DisableRuntimeUnroll = true; 7490 7491 // Report the vectorization decision. 7492 ORE->emit([&]() { 7493 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7494 L->getHeader()) 7495 << "vectorized loop (vectorization width: " 7496 << NV("VectorizationFactor", VF.Width) 7497 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7498 }); 7499 } 7500 7501 Optional<MDNode *> RemainderLoopID = 7502 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7503 LLVMLoopVectorizeFollowupEpilogue}); 7504 if (RemainderLoopID.hasValue()) { 7505 L->setLoopID(RemainderLoopID.getValue()); 7506 } else { 7507 if (DisableRuntimeUnroll) 7508 AddRuntimeUnrollDisableMetaData(L); 7509 7510 // Mark the loop as already vectorized to avoid vectorizing again. 7511 Hints.setAlreadyVectorized(); 7512 } 7513 7514 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7515 return true; 7516 } 7517 7518 bool LoopVectorizePass::runImpl( 7519 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7520 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7521 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7522 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7523 OptimizationRemarkEmitter &ORE_) { 7524 SE = &SE_; 7525 LI = &LI_; 7526 TTI = &TTI_; 7527 DT = &DT_; 7528 BFI = &BFI_; 7529 TLI = TLI_; 7530 AA = &AA_; 7531 AC = &AC_; 7532 GetLAA = &GetLAA_; 7533 DB = &DB_; 7534 ORE = &ORE_; 7535 7536 // Don't attempt if 7537 // 1. the target claims to have no vector registers, and 7538 // 2. interleaving won't help ILP. 7539 // 7540 // The second condition is necessary because, even if the target has no 7541 // vector registers, loop vectorization may still enable scalar 7542 // interleaving. 7543 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7544 return false; 7545 7546 bool Changed = false; 7547 7548 // The vectorizer requires loops to be in simplified form. 7549 // Since simplification may add new inner loops, it has to run before the 7550 // legality and profitability checks. This means running the loop vectorizer 7551 // will simplify all loops, regardless of whether anything end up being 7552 // vectorized. 7553 for (auto &L : *LI) 7554 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 7555 7556 // Build up a worklist of inner-loops to vectorize. This is necessary as 7557 // the act of vectorizing or partially unrolling a loop creates new loops 7558 // and can invalidate iterators across the loops. 7559 SmallVector<Loop *, 8> Worklist; 7560 7561 for (Loop *L : *LI) 7562 collectSupportedLoops(*L, LI, ORE, Worklist); 7563 7564 LoopsAnalyzed += Worklist.size(); 7565 7566 // Now walk the identified inner loops. 7567 while (!Worklist.empty()) { 7568 Loop *L = Worklist.pop_back_val(); 7569 7570 // For the inner loops we actually process, form LCSSA to simplify the 7571 // transform. 7572 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7573 7574 Changed |= processLoop(L); 7575 } 7576 7577 // Process each loop nest in the function. 7578 return Changed; 7579 } 7580 7581 PreservedAnalyses LoopVectorizePass::run(Function &F, 7582 FunctionAnalysisManager &AM) { 7583 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7584 auto &LI = AM.getResult<LoopAnalysis>(F); 7585 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7586 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7587 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7588 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7589 auto &AA = AM.getResult<AAManager>(F); 7590 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7591 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7592 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7593 MemorySSA *MSSA = EnableMSSALoopDependency 7594 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 7595 : nullptr; 7596 7597 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7598 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7599 [&](Loop &L) -> const LoopAccessInfo & { 7600 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA}; 7601 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7602 }; 7603 bool Changed = 7604 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 7605 if (!Changed) 7606 return PreservedAnalyses::all(); 7607 PreservedAnalyses PA; 7608 7609 // We currently do not preserve loopinfo/dominator analyses with outer loop 7610 // vectorization. Until this is addressed, mark these analyses as preserved 7611 // only for non-VPlan-native path. 7612 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 7613 if (!EnableVPlanNativePath) { 7614 PA.preserve<LoopAnalysis>(); 7615 PA.preserve<DominatorTreeAnalysis>(); 7616 } 7617 PA.preserve<BasicAA>(); 7618 PA.preserve<GlobalsAA>(); 7619 return PA; 7620 } 7621