1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlanHCFGBuilder.h" 60 #include "VPlanHCFGTransforms.h" 61 #include "VPlanPredicator.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DenseMapInfo.h" 66 #include "llvm/ADT/Hashing.h" 67 #include "llvm/ADT/MapVector.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SetVector.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 90 #include "llvm/Analysis/ScalarEvolution.h" 91 #include "llvm/Analysis/ScalarEvolutionExpander.h" 92 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 93 #include "llvm/Analysis/TargetLibraryInfo.h" 94 #include "llvm/Analysis/TargetTransformInfo.h" 95 #include "llvm/Analysis/VectorUtils.h" 96 #include "llvm/IR/Attributes.h" 97 #include "llvm/IR/BasicBlock.h" 98 #include "llvm/IR/CFG.h" 99 #include "llvm/IR/Constant.h" 100 #include "llvm/IR/Constants.h" 101 #include "llvm/IR/DataLayout.h" 102 #include "llvm/IR/DebugInfoMetadata.h" 103 #include "llvm/IR/DebugLoc.h" 104 #include "llvm/IR/DerivedTypes.h" 105 #include "llvm/IR/DiagnosticInfo.h" 106 #include "llvm/IR/Dominators.h" 107 #include "llvm/IR/Function.h" 108 #include "llvm/IR/IRBuilder.h" 109 #include "llvm/IR/InstrTypes.h" 110 #include "llvm/IR/Instruction.h" 111 #include "llvm/IR/Instructions.h" 112 #include "llvm/IR/IntrinsicInst.h" 113 #include "llvm/IR/Intrinsics.h" 114 #include "llvm/IR/LLVMContext.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/Type.h" 119 #include "llvm/IR/Use.h" 120 #include "llvm/IR/User.h" 121 #include "llvm/IR/Value.h" 122 #include "llvm/IR/ValueHandle.h" 123 #include "llvm/IR/Verifier.h" 124 #include "llvm/Pass.h" 125 #include "llvm/Support/Casting.h" 126 #include "llvm/Support/CommandLine.h" 127 #include "llvm/Support/Compiler.h" 128 #include "llvm/Support/Debug.h" 129 #include "llvm/Support/ErrorHandling.h" 130 #include "llvm/Support/MathExtras.h" 131 #include "llvm/Support/raw_ostream.h" 132 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 133 #include "llvm/Transforms/Utils/LoopSimplify.h" 134 #include "llvm/Transforms/Utils/LoopUtils.h" 135 #include "llvm/Transforms/Utils/LoopVersioning.h" 136 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 137 #include <algorithm> 138 #include <cassert> 139 #include <cstdint> 140 #include <cstdlib> 141 #include <functional> 142 #include <iterator> 143 #include <limits> 144 #include <memory> 145 #include <string> 146 #include <tuple> 147 #include <utility> 148 #include <vector> 149 150 using namespace llvm; 151 152 #define LV_NAME "loop-vectorize" 153 #define DEBUG_TYPE LV_NAME 154 155 /// @{ 156 /// Metadata attribute names 157 static const char *const LLVMLoopVectorizeFollowupAll = 158 "llvm.loop.vectorize.followup_all"; 159 static const char *const LLVMLoopVectorizeFollowupVectorized = 160 "llvm.loop.vectorize.followup_vectorized"; 161 static const char *const LLVMLoopVectorizeFollowupEpilogue = 162 "llvm.loop.vectorize.followup_epilogue"; 163 /// @} 164 165 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 166 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 167 168 /// Loops with a known constant trip count below this number are vectorized only 169 /// if no scalar iteration overheads are incurred. 170 static cl::opt<unsigned> TinyTripCountVectorThreshold( 171 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 172 cl::desc("Loops with a constant trip count that is smaller than this " 173 "value are vectorized only if no scalar iteration overheads " 174 "are incurred.")); 175 176 static cl::opt<bool> MaximizeBandwidth( 177 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 178 cl::desc("Maximize bandwidth when selecting vectorization factor which " 179 "will be determined by the smallest type in loop.")); 180 181 static cl::opt<bool> EnableInterleavedMemAccesses( 182 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 183 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 184 185 /// An interleave-group may need masking if it resides in a block that needs 186 /// predication, or in order to mask away gaps. 187 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 188 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 189 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 190 191 /// We don't interleave loops with a known constant trip count below this 192 /// number. 193 static const unsigned TinyTripCountInterleaveThreshold = 128; 194 195 static cl::opt<unsigned> ForceTargetNumScalarRegs( 196 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 197 cl::desc("A flag that overrides the target's number of scalar registers.")); 198 199 static cl::opt<unsigned> ForceTargetNumVectorRegs( 200 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 201 cl::desc("A flag that overrides the target's number of vector registers.")); 202 203 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 204 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 205 cl::desc("A flag that overrides the target's max interleave factor for " 206 "scalar loops.")); 207 208 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 209 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 210 cl::desc("A flag that overrides the target's max interleave factor for " 211 "vectorized loops.")); 212 213 static cl::opt<unsigned> ForceTargetInstructionCost( 214 "force-target-instruction-cost", cl::init(0), cl::Hidden, 215 cl::desc("A flag that overrides the target's expected cost for " 216 "an instruction to a single constant value. Mostly " 217 "useful for getting consistent testing.")); 218 219 static cl::opt<unsigned> SmallLoopCost( 220 "small-loop-cost", cl::init(20), cl::Hidden, 221 cl::desc( 222 "The cost of a loop that is considered 'small' by the interleaver.")); 223 224 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 225 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 226 cl::desc("Enable the use of the block frequency analysis to access PGO " 227 "heuristics minimizing code growth in cold regions and being more " 228 "aggressive in hot regions.")); 229 230 // Runtime interleave loops for load/store throughput. 231 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 232 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 233 cl::desc( 234 "Enable runtime interleaving until load/store ports are saturated")); 235 236 /// The number of stores in a loop that are allowed to need predication. 237 static cl::opt<unsigned> NumberOfStoresToPredicate( 238 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 239 cl::desc("Max number of stores to be predicated behind an if.")); 240 241 static cl::opt<bool> EnableIndVarRegisterHeur( 242 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 243 cl::desc("Count the induction variable only once when interleaving")); 244 245 static cl::opt<bool> EnableCondStoresVectorization( 246 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 247 cl::desc("Enable if predication of stores during vectorization.")); 248 249 static cl::opt<unsigned> MaxNestedScalarReductionIC( 250 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 251 cl::desc("The maximum interleave count to use when interleaving a scalar " 252 "reduction in a nested loop.")); 253 254 cl::opt<bool> EnableVPlanNativePath( 255 "enable-vplan-native-path", cl::init(false), cl::Hidden, 256 cl::desc("Enable VPlan-native vectorization path with " 257 "support for outer loop vectorization.")); 258 259 // FIXME: Remove this switch once we have divergence analysis. Currently we 260 // assume divergent non-backedge branches when this switch is true. 261 cl::opt<bool> EnableVPlanPredication( 262 "enable-vplan-predication", cl::init(false), cl::Hidden, 263 cl::desc("Enable VPlan-native vectorization path predicator with " 264 "support for outer loop vectorization.")); 265 266 // This flag enables the stress testing of the VPlan H-CFG construction in the 267 // VPlan-native vectorization path. It must be used in conjuction with 268 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 269 // verification of the H-CFGs built. 270 static cl::opt<bool> VPlanBuildStressTest( 271 "vplan-build-stress-test", cl::init(false), cl::Hidden, 272 cl::desc( 273 "Build VPlan for every supported loop nest in the function and bail " 274 "out right after the build (stress test the VPlan H-CFG construction " 275 "in the VPlan-native vectorization path).")); 276 277 /// A helper function for converting Scalar types to vector types. 278 /// If the incoming type is void, we return void. If the VF is 1, we return 279 /// the scalar type. 280 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 281 if (Scalar->isVoidTy() || VF == 1) 282 return Scalar; 283 return VectorType::get(Scalar, VF); 284 } 285 286 /// A helper function that returns the type of loaded or stored value. 287 static Type *getMemInstValueType(Value *I) { 288 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 289 "Expected Load or Store instruction"); 290 if (auto *LI = dyn_cast<LoadInst>(I)) 291 return LI->getType(); 292 return cast<StoreInst>(I)->getValueOperand()->getType(); 293 } 294 295 /// A helper function that returns true if the given type is irregular. The 296 /// type is irregular if its allocated size doesn't equal the store size of an 297 /// element of the corresponding vector type at the given vectorization factor. 298 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 299 // Determine if an array of VF elements of type Ty is "bitcast compatible" 300 // with a <VF x Ty> vector. 301 if (VF > 1) { 302 auto *VectorTy = VectorType::get(Ty, VF); 303 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 304 } 305 306 // If the vectorization factor is one, we just check if an array of type Ty 307 // requires padding between elements. 308 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 309 } 310 311 /// A helper function that returns the reciprocal of the block probability of 312 /// predicated blocks. If we return X, we are assuming the predicated block 313 /// will execute once for every X iterations of the loop header. 314 /// 315 /// TODO: We should use actual block probability here, if available. Currently, 316 /// we always assume predicated blocks have a 50% chance of executing. 317 static unsigned getReciprocalPredBlockProb() { return 2; } 318 319 /// A helper function that adds a 'fast' flag to floating-point operations. 320 static Value *addFastMathFlag(Value *V) { 321 if (isa<FPMathOperator>(V)) { 322 FastMathFlags Flags; 323 Flags.setFast(); 324 cast<Instruction>(V)->setFastMathFlags(Flags); 325 } 326 return V; 327 } 328 329 /// A helper function that returns an integer or floating-point constant with 330 /// value C. 331 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 332 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 333 : ConstantFP::get(Ty, C); 334 } 335 336 namespace llvm { 337 338 /// InnerLoopVectorizer vectorizes loops which contain only one basic 339 /// block to a specified vectorization factor (VF). 340 /// This class performs the widening of scalars into vectors, or multiple 341 /// scalars. This class also implements the following features: 342 /// * It inserts an epilogue loop for handling loops that don't have iteration 343 /// counts that are known to be a multiple of the vectorization factor. 344 /// * It handles the code generation for reduction variables. 345 /// * Scalarization (implementation using scalars) of un-vectorizable 346 /// instructions. 347 /// InnerLoopVectorizer does not perform any vectorization-legality 348 /// checks, and relies on the caller to check for the different legality 349 /// aspects. The InnerLoopVectorizer relies on the 350 /// LoopVectorizationLegality class to provide information about the induction 351 /// and reduction variables that were found to a given vectorization factor. 352 class InnerLoopVectorizer { 353 public: 354 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 355 LoopInfo *LI, DominatorTree *DT, 356 const TargetLibraryInfo *TLI, 357 const TargetTransformInfo *TTI, AssumptionCache *AC, 358 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 359 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 360 LoopVectorizationCostModel *CM) 361 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 362 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 363 Builder(PSE.getSE()->getContext()), 364 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 365 virtual ~InnerLoopVectorizer() = default; 366 367 /// Create a new empty loop. Unlink the old loop and connect the new one. 368 /// Return the pre-header block of the new loop. 369 BasicBlock *createVectorizedLoopSkeleton(); 370 371 /// Widen a single instruction within the innermost loop. 372 void widenInstruction(Instruction &I); 373 374 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 375 void fixVectorizedLoop(); 376 377 // Return true if any runtime check is added. 378 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 379 380 /// A type for vectorized values in the new loop. Each value from the 381 /// original loop, when vectorized, is represented by UF vector values in the 382 /// new unrolled loop, where UF is the unroll factor. 383 using VectorParts = SmallVector<Value *, 2>; 384 385 /// Vectorize a single PHINode in a block. This method handles the induction 386 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 387 /// arbitrary length vectors. 388 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 389 390 /// A helper function to scalarize a single Instruction in the innermost loop. 391 /// Generates a sequence of scalar instances for each lane between \p MinLane 392 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 393 /// inclusive.. 394 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 395 bool IfPredicateInstr); 396 397 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 398 /// is provided, the integer induction variable will first be truncated to 399 /// the corresponding type. 400 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 401 402 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 403 /// vector or scalar value on-demand if one is not yet available. When 404 /// vectorizing a loop, we visit the definition of an instruction before its 405 /// uses. When visiting the definition, we either vectorize or scalarize the 406 /// instruction, creating an entry for it in the corresponding map. (In some 407 /// cases, such as induction variables, we will create both vector and scalar 408 /// entries.) Then, as we encounter uses of the definition, we derive values 409 /// for each scalar or vector use unless such a value is already available. 410 /// For example, if we scalarize a definition and one of its uses is vector, 411 /// we build the required vector on-demand with an insertelement sequence 412 /// when visiting the use. Otherwise, if the use is scalar, we can use the 413 /// existing scalar definition. 414 /// 415 /// Return a value in the new loop corresponding to \p V from the original 416 /// loop at unroll index \p Part. If the value has already been vectorized, 417 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 418 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 419 /// a new vector value on-demand by inserting the scalar values into a vector 420 /// with an insertelement sequence. If the value has been neither vectorized 421 /// nor scalarized, it must be loop invariant, so we simply broadcast the 422 /// value into a vector. 423 Value *getOrCreateVectorValue(Value *V, unsigned Part); 424 425 /// Return a value in the new loop corresponding to \p V from the original 426 /// loop at unroll and vector indices \p Instance. If the value has been 427 /// vectorized but not scalarized, the necessary extractelement instruction 428 /// will be generated. 429 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 430 431 /// Construct the vector value of a scalarized value \p V one lane at a time. 432 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 433 434 /// Try to vectorize the interleaved access group that \p Instr belongs to, 435 /// optionally masking the vector operations if \p BlockInMask is non-null. 436 void vectorizeInterleaveGroup(Instruction *Instr, 437 VectorParts *BlockInMask = nullptr); 438 439 /// Vectorize Load and Store instructions, optionally masking the vector 440 /// operations if \p BlockInMask is non-null. 441 void vectorizeMemoryInstruction(Instruction *Instr, 442 VectorParts *BlockInMask = nullptr); 443 444 /// Set the debug location in the builder using the debug location in 445 /// the instruction. 446 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 447 448 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 449 void fixNonInductionPHIs(void); 450 451 protected: 452 friend class LoopVectorizationPlanner; 453 454 /// A small list of PHINodes. 455 using PhiVector = SmallVector<PHINode *, 4>; 456 457 /// A type for scalarized values in the new loop. Each value from the 458 /// original loop, when scalarized, is represented by UF x VF scalar values 459 /// in the new unrolled loop, where UF is the unroll factor and VF is the 460 /// vectorization factor. 461 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 462 463 /// Set up the values of the IVs correctly when exiting the vector loop. 464 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 465 Value *CountRoundDown, Value *EndValue, 466 BasicBlock *MiddleBlock); 467 468 /// Create a new induction variable inside L. 469 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 470 Value *Step, Instruction *DL); 471 472 /// Handle all cross-iteration phis in the header. 473 void fixCrossIterationPHIs(); 474 475 /// Fix a first-order recurrence. This is the second phase of vectorizing 476 /// this phi node. 477 void fixFirstOrderRecurrence(PHINode *Phi); 478 479 /// Fix a reduction cross-iteration phi. This is the second phase of 480 /// vectorizing this phi node. 481 void fixReduction(PHINode *Phi); 482 483 /// The Loop exit block may have single value PHI nodes with some 484 /// incoming value. While vectorizing we only handled real values 485 /// that were defined inside the loop and we should have one value for 486 /// each predecessor of its parent basic block. See PR14725. 487 void fixLCSSAPHIs(); 488 489 /// Iteratively sink the scalarized operands of a predicated instruction into 490 /// the block that was created for it. 491 void sinkScalarOperands(Instruction *PredInst); 492 493 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 494 /// represented as. 495 void truncateToMinimalBitwidths(); 496 497 /// Insert the new loop to the loop hierarchy and pass manager 498 /// and update the analysis passes. 499 void updateAnalysis(); 500 501 /// Create a broadcast instruction. This method generates a broadcast 502 /// instruction (shuffle) for loop invariant values and for the induction 503 /// value. If this is the induction variable then we extend it to N, N+1, ... 504 /// this is needed because each iteration in the loop corresponds to a SIMD 505 /// element. 506 virtual Value *getBroadcastInstrs(Value *V); 507 508 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 509 /// to each vector element of Val. The sequence starts at StartIndex. 510 /// \p Opcode is relevant for FP induction variable. 511 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 512 Instruction::BinaryOps Opcode = 513 Instruction::BinaryOpsEnd); 514 515 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 516 /// variable on which to base the steps, \p Step is the size of the step, and 517 /// \p EntryVal is the value from the original loop that maps to the steps. 518 /// Note that \p EntryVal doesn't have to be an induction variable - it 519 /// can also be a truncate instruction. 520 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 521 const InductionDescriptor &ID); 522 523 /// Create a vector induction phi node based on an existing scalar one. \p 524 /// EntryVal is the value from the original loop that maps to the vector phi 525 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 526 /// truncate instruction, instead of widening the original IV, we widen a 527 /// version of the IV truncated to \p EntryVal's type. 528 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 529 Value *Step, Instruction *EntryVal); 530 531 /// Returns true if an instruction \p I should be scalarized instead of 532 /// vectorized for the chosen vectorization factor. 533 bool shouldScalarizeInstruction(Instruction *I) const; 534 535 /// Returns true if we should generate a scalar version of \p IV. 536 bool needsScalarInduction(Instruction *IV) const; 537 538 /// If there is a cast involved in the induction variable \p ID, which should 539 /// be ignored in the vectorized loop body, this function records the 540 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 541 /// cast. We had already proved that the casted Phi is equal to the uncasted 542 /// Phi in the vectorized loop (under a runtime guard), and therefore 543 /// there is no need to vectorize the cast - the same value can be used in the 544 /// vector loop for both the Phi and the cast. 545 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 546 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 547 /// 548 /// \p EntryVal is the value from the original loop that maps to the vector 549 /// phi node and is used to distinguish what is the IV currently being 550 /// processed - original one (if \p EntryVal is a phi corresponding to the 551 /// original IV) or the "newly-created" one based on the proof mentioned above 552 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 553 /// latter case \p EntryVal is a TruncInst and we must not record anything for 554 /// that IV, but it's error-prone to expect callers of this routine to care 555 /// about that, hence this explicit parameter. 556 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 557 const Instruction *EntryVal, 558 Value *VectorLoopValue, 559 unsigned Part, 560 unsigned Lane = UINT_MAX); 561 562 /// Generate a shuffle sequence that will reverse the vector Vec. 563 virtual Value *reverseVector(Value *Vec); 564 565 /// Returns (and creates if needed) the original loop trip count. 566 Value *getOrCreateTripCount(Loop *NewLoop); 567 568 /// Returns (and creates if needed) the trip count of the widened loop. 569 Value *getOrCreateVectorTripCount(Loop *NewLoop); 570 571 /// Returns a bitcasted value to the requested vector type. 572 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 573 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 574 const DataLayout &DL); 575 576 /// Emit a bypass check to see if the vector trip count is zero, including if 577 /// it overflows. 578 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 579 580 /// Emit a bypass check to see if all of the SCEV assumptions we've 581 /// had to make are correct. 582 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 583 584 /// Emit bypass checks to check any memory assumptions we may have made. 585 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 586 587 /// Compute the transformed value of Index at offset StartValue using step 588 /// StepValue. 589 /// For integer induction, returns StartValue + Index * StepValue. 590 /// For pointer induction, returns StartValue[Index * StepValue]. 591 /// FIXME: The newly created binary instructions should contain nsw/nuw 592 /// flags, which can be found from the original scalar operations. 593 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 594 const DataLayout &DL, 595 const InductionDescriptor &ID) const; 596 597 /// Add additional metadata to \p To that was not present on \p Orig. 598 /// 599 /// Currently this is used to add the noalias annotations based on the 600 /// inserted memchecks. Use this for instructions that are *cloned* into the 601 /// vector loop. 602 void addNewMetadata(Instruction *To, const Instruction *Orig); 603 604 /// Add metadata from one instruction to another. 605 /// 606 /// This includes both the original MDs from \p From and additional ones (\see 607 /// addNewMetadata). Use this for *newly created* instructions in the vector 608 /// loop. 609 void addMetadata(Instruction *To, Instruction *From); 610 611 /// Similar to the previous function but it adds the metadata to a 612 /// vector of instructions. 613 void addMetadata(ArrayRef<Value *> To, Instruction *From); 614 615 /// The original loop. 616 Loop *OrigLoop; 617 618 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 619 /// dynamic knowledge to simplify SCEV expressions and converts them to a 620 /// more usable form. 621 PredicatedScalarEvolution &PSE; 622 623 /// Loop Info. 624 LoopInfo *LI; 625 626 /// Dominator Tree. 627 DominatorTree *DT; 628 629 /// Alias Analysis. 630 AliasAnalysis *AA; 631 632 /// Target Library Info. 633 const TargetLibraryInfo *TLI; 634 635 /// Target Transform Info. 636 const TargetTransformInfo *TTI; 637 638 /// Assumption Cache. 639 AssumptionCache *AC; 640 641 /// Interface to emit optimization remarks. 642 OptimizationRemarkEmitter *ORE; 643 644 /// LoopVersioning. It's only set up (non-null) if memchecks were 645 /// used. 646 /// 647 /// This is currently only used to add no-alias metadata based on the 648 /// memchecks. The actually versioning is performed manually. 649 std::unique_ptr<LoopVersioning> LVer; 650 651 /// The vectorization SIMD factor to use. Each vector will have this many 652 /// vector elements. 653 unsigned VF; 654 655 /// The vectorization unroll factor to use. Each scalar is vectorized to this 656 /// many different vector instructions. 657 unsigned UF; 658 659 /// The builder that we use 660 IRBuilder<> Builder; 661 662 // --- Vectorization state --- 663 664 /// The vector-loop preheader. 665 BasicBlock *LoopVectorPreHeader; 666 667 /// The scalar-loop preheader. 668 BasicBlock *LoopScalarPreHeader; 669 670 /// Middle Block between the vector and the scalar. 671 BasicBlock *LoopMiddleBlock; 672 673 /// The ExitBlock of the scalar loop. 674 BasicBlock *LoopExitBlock; 675 676 /// The vector loop body. 677 BasicBlock *LoopVectorBody; 678 679 /// The scalar loop body. 680 BasicBlock *LoopScalarBody; 681 682 /// A list of all bypass blocks. The first block is the entry of the loop. 683 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 684 685 /// The new Induction variable which was added to the new block. 686 PHINode *Induction = nullptr; 687 688 /// The induction variable of the old basic block. 689 PHINode *OldInduction = nullptr; 690 691 /// Maps values from the original loop to their corresponding values in the 692 /// vectorized loop. A key value can map to either vector values, scalar 693 /// values or both kinds of values, depending on whether the key was 694 /// vectorized and scalarized. 695 VectorizerValueMap VectorLoopValueMap; 696 697 /// Store instructions that were predicated. 698 SmallVector<Instruction *, 4> PredicatedInstructions; 699 700 /// Trip count of the original loop. 701 Value *TripCount = nullptr; 702 703 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 704 Value *VectorTripCount = nullptr; 705 706 /// The legality analysis. 707 LoopVectorizationLegality *Legal; 708 709 /// The profitablity analysis. 710 LoopVectorizationCostModel *Cost; 711 712 // Record whether runtime checks are added. 713 bool AddedSafetyChecks = false; 714 715 // Holds the end values for each induction variable. We save the end values 716 // so we can later fix-up the external users of the induction variables. 717 DenseMap<PHINode *, Value *> IVEndValues; 718 719 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 720 // fixed up at the end of vector code generation. 721 SmallVector<PHINode *, 8> OrigPHIsToFix; 722 }; 723 724 class InnerLoopUnroller : public InnerLoopVectorizer { 725 public: 726 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 727 LoopInfo *LI, DominatorTree *DT, 728 const TargetLibraryInfo *TLI, 729 const TargetTransformInfo *TTI, AssumptionCache *AC, 730 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 731 LoopVectorizationLegality *LVL, 732 LoopVectorizationCostModel *CM) 733 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 734 UnrollFactor, LVL, CM) {} 735 736 private: 737 Value *getBroadcastInstrs(Value *V) override; 738 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 739 Instruction::BinaryOps Opcode = 740 Instruction::BinaryOpsEnd) override; 741 Value *reverseVector(Value *Vec) override; 742 }; 743 744 } // end namespace llvm 745 746 /// Look for a meaningful debug location on the instruction or it's 747 /// operands. 748 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 749 if (!I) 750 return I; 751 752 DebugLoc Empty; 753 if (I->getDebugLoc() != Empty) 754 return I; 755 756 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 757 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 758 if (OpInst->getDebugLoc() != Empty) 759 return OpInst; 760 } 761 762 return I; 763 } 764 765 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 766 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 767 const DILocation *DIL = Inst->getDebugLoc(); 768 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 769 !isa<DbgInfoIntrinsic>(Inst)) { 770 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF); 771 if (NewDIL) 772 B.SetCurrentDebugLocation(NewDIL.getValue()); 773 else 774 LLVM_DEBUG(dbgs() 775 << "Failed to create new discriminator: " 776 << DIL->getFilename() << " Line: " << DIL->getLine()); 777 } 778 else 779 B.SetCurrentDebugLocation(DIL); 780 } else 781 B.SetCurrentDebugLocation(DebugLoc()); 782 } 783 784 #ifndef NDEBUG 785 /// \return string containing a file name and a line # for the given loop. 786 static std::string getDebugLocString(const Loop *L) { 787 std::string Result; 788 if (L) { 789 raw_string_ostream OS(Result); 790 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 791 LoopDbgLoc.print(OS); 792 else 793 // Just print the module name. 794 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 795 OS.flush(); 796 } 797 return Result; 798 } 799 #endif 800 801 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 802 const Instruction *Orig) { 803 // If the loop was versioned with memchecks, add the corresponding no-alias 804 // metadata. 805 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 806 LVer->annotateInstWithNoAlias(To, Orig); 807 } 808 809 void InnerLoopVectorizer::addMetadata(Instruction *To, 810 Instruction *From) { 811 propagateMetadata(To, From); 812 addNewMetadata(To, From); 813 } 814 815 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 816 Instruction *From) { 817 for (Value *V : To) { 818 if (Instruction *I = dyn_cast<Instruction>(V)) 819 addMetadata(I, From); 820 } 821 } 822 823 namespace llvm { 824 825 /// LoopVectorizationCostModel - estimates the expected speedups due to 826 /// vectorization. 827 /// In many cases vectorization is not profitable. This can happen because of 828 /// a number of reasons. In this class we mainly attempt to predict the 829 /// expected speedup/slowdowns due to the supported instruction set. We use the 830 /// TargetTransformInfo to query the different backends for the cost of 831 /// different operations. 832 class LoopVectorizationCostModel { 833 public: 834 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 835 LoopInfo *LI, LoopVectorizationLegality *Legal, 836 const TargetTransformInfo &TTI, 837 const TargetLibraryInfo *TLI, DemandedBits *DB, 838 AssumptionCache *AC, 839 OptimizationRemarkEmitter *ORE, const Function *F, 840 const LoopVectorizeHints *Hints, 841 InterleavedAccessInfo &IAI) 842 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 843 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {} 844 845 /// \return An upper bound for the vectorization factor, or None if 846 /// vectorization should be avoided up front. 847 Optional<unsigned> computeMaxVF(bool OptForSize); 848 849 /// \return The most profitable vectorization factor and the cost of that VF. 850 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 851 /// then this vectorization factor will be selected if vectorization is 852 /// possible. 853 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 854 855 /// Setup cost-based decisions for user vectorization factor. 856 void selectUserVectorizationFactor(unsigned UserVF) { 857 collectUniformsAndScalars(UserVF); 858 collectInstsToScalarize(UserVF); 859 } 860 861 /// \return The size (in bits) of the smallest and widest types in the code 862 /// that needs to be vectorized. We ignore values that remain scalar such as 863 /// 64 bit loop indices. 864 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 865 866 /// \return The desired interleave count. 867 /// If interleave count has been specified by metadata it will be returned. 868 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 869 /// are the selected vectorization factor and the cost of the selected VF. 870 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 871 unsigned LoopCost); 872 873 /// Memory access instruction may be vectorized in more than one way. 874 /// Form of instruction after vectorization depends on cost. 875 /// This function takes cost-based decisions for Load/Store instructions 876 /// and collects them in a map. This decisions map is used for building 877 /// the lists of loop-uniform and loop-scalar instructions. 878 /// The calculated cost is saved with widening decision in order to 879 /// avoid redundant calculations. 880 void setCostBasedWideningDecision(unsigned VF); 881 882 /// A struct that represents some properties of the register usage 883 /// of a loop. 884 struct RegisterUsage { 885 /// Holds the number of loop invariant values that are used in the loop. 886 unsigned LoopInvariantRegs; 887 888 /// Holds the maximum number of concurrent live intervals in the loop. 889 unsigned MaxLocalUsers; 890 }; 891 892 /// \return Returns information about the register usages of the loop for the 893 /// given vectorization factors. 894 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 895 896 /// Collect values we want to ignore in the cost model. 897 void collectValuesToIgnore(); 898 899 /// \returns The smallest bitwidth each instruction can be represented with. 900 /// The vector equivalents of these instructions should be truncated to this 901 /// type. 902 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 903 return MinBWs; 904 } 905 906 /// \returns True if it is more profitable to scalarize instruction \p I for 907 /// vectorization factor \p VF. 908 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 909 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 910 911 // Cost model is not run in the VPlan-native path - return conservative 912 // result until this changes. 913 if (EnableVPlanNativePath) 914 return false; 915 916 auto Scalars = InstsToScalarize.find(VF); 917 assert(Scalars != InstsToScalarize.end() && 918 "VF not yet analyzed for scalarization profitability"); 919 return Scalars->second.find(I) != Scalars->second.end(); 920 } 921 922 /// Returns true if \p I is known to be uniform after vectorization. 923 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 924 if (VF == 1) 925 return true; 926 927 // Cost model is not run in the VPlan-native path - return conservative 928 // result until this changes. 929 if (EnableVPlanNativePath) 930 return false; 931 932 auto UniformsPerVF = Uniforms.find(VF); 933 assert(UniformsPerVF != Uniforms.end() && 934 "VF not yet analyzed for uniformity"); 935 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 936 } 937 938 /// Returns true if \p I is known to be scalar after vectorization. 939 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 940 if (VF == 1) 941 return true; 942 943 // Cost model is not run in the VPlan-native path - return conservative 944 // result until this changes. 945 if (EnableVPlanNativePath) 946 return false; 947 948 auto ScalarsPerVF = Scalars.find(VF); 949 assert(ScalarsPerVF != Scalars.end() && 950 "Scalar values are not calculated for VF"); 951 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 952 } 953 954 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 955 /// for vectorization factor \p VF. 956 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 957 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 958 !isProfitableToScalarize(I, VF) && 959 !isScalarAfterVectorization(I, VF); 960 } 961 962 /// Decision that was taken during cost calculation for memory instruction. 963 enum InstWidening { 964 CM_Unknown, 965 CM_Widen, // For consecutive accesses with stride +1. 966 CM_Widen_Reverse, // For consecutive accesses with stride -1. 967 CM_Interleave, 968 CM_GatherScatter, 969 CM_Scalarize 970 }; 971 972 /// Save vectorization decision \p W and \p Cost taken by the cost model for 973 /// instruction \p I and vector width \p VF. 974 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 975 unsigned Cost) { 976 assert(VF >= 2 && "Expected VF >=2"); 977 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 978 } 979 980 /// Save vectorization decision \p W and \p Cost taken by the cost model for 981 /// interleaving group \p Grp and vector width \p VF. 982 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 983 InstWidening W, unsigned Cost) { 984 assert(VF >= 2 && "Expected VF >=2"); 985 /// Broadcast this decicion to all instructions inside the group. 986 /// But the cost will be assigned to one instruction only. 987 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 988 if (auto *I = Grp->getMember(i)) { 989 if (Grp->getInsertPos() == I) 990 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 991 else 992 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 993 } 994 } 995 } 996 997 /// Return the cost model decision for the given instruction \p I and vector 998 /// width \p VF. Return CM_Unknown if this instruction did not pass 999 /// through the cost modeling. 1000 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1001 assert(VF >= 2 && "Expected VF >=2"); 1002 1003 // Cost model is not run in the VPlan-native path - return conservative 1004 // result until this changes. 1005 if (EnableVPlanNativePath) 1006 return CM_GatherScatter; 1007 1008 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1009 auto Itr = WideningDecisions.find(InstOnVF); 1010 if (Itr == WideningDecisions.end()) 1011 return CM_Unknown; 1012 return Itr->second.first; 1013 } 1014 1015 /// Return the vectorization cost for the given instruction \p I and vector 1016 /// width \p VF. 1017 unsigned getWideningCost(Instruction *I, unsigned VF) { 1018 assert(VF >= 2 && "Expected VF >=2"); 1019 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1020 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1021 "The cost is not calculated"); 1022 return WideningDecisions[InstOnVF].second; 1023 } 1024 1025 /// Return True if instruction \p I is an optimizable truncate whose operand 1026 /// is an induction variable. Such a truncate will be removed by adding a new 1027 /// induction variable with the destination type. 1028 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1029 // If the instruction is not a truncate, return false. 1030 auto *Trunc = dyn_cast<TruncInst>(I); 1031 if (!Trunc) 1032 return false; 1033 1034 // Get the source and destination types of the truncate. 1035 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1036 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1037 1038 // If the truncate is free for the given types, return false. Replacing a 1039 // free truncate with an induction variable would add an induction variable 1040 // update instruction to each iteration of the loop. We exclude from this 1041 // check the primary induction variable since it will need an update 1042 // instruction regardless. 1043 Value *Op = Trunc->getOperand(0); 1044 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1045 return false; 1046 1047 // If the truncated value is not an induction variable, return false. 1048 return Legal->isInductionPhi(Op); 1049 } 1050 1051 /// Collects the instructions to scalarize for each predicated instruction in 1052 /// the loop. 1053 void collectInstsToScalarize(unsigned VF); 1054 1055 /// Collect Uniform and Scalar values for the given \p VF. 1056 /// The sets depend on CM decision for Load/Store instructions 1057 /// that may be vectorized as interleave, gather-scatter or scalarized. 1058 void collectUniformsAndScalars(unsigned VF) { 1059 // Do the analysis once. 1060 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1061 return; 1062 setCostBasedWideningDecision(VF); 1063 collectLoopUniforms(VF); 1064 collectLoopScalars(VF); 1065 } 1066 1067 /// Returns true if the target machine supports masked store operation 1068 /// for the given \p DataType and kind of access to \p Ptr. 1069 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1070 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType); 1071 } 1072 1073 /// Returns true if the target machine supports masked load operation 1074 /// for the given \p DataType and kind of access to \p Ptr. 1075 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1076 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType); 1077 } 1078 1079 /// Returns true if the target machine supports masked scatter operation 1080 /// for the given \p DataType. 1081 bool isLegalMaskedScatter(Type *DataType) { 1082 return TTI.isLegalMaskedScatter(DataType); 1083 } 1084 1085 /// Returns true if the target machine supports masked gather operation 1086 /// for the given \p DataType. 1087 bool isLegalMaskedGather(Type *DataType) { 1088 return TTI.isLegalMaskedGather(DataType); 1089 } 1090 1091 /// Returns true if the target machine can represent \p V as a masked gather 1092 /// or scatter operation. 1093 bool isLegalGatherOrScatter(Value *V) { 1094 bool LI = isa<LoadInst>(V); 1095 bool SI = isa<StoreInst>(V); 1096 if (!LI && !SI) 1097 return false; 1098 auto *Ty = getMemInstValueType(V); 1099 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1100 } 1101 1102 /// Returns true if \p I is an instruction that will be scalarized with 1103 /// predication. Such instructions include conditional stores and 1104 /// instructions that may divide by zero. 1105 /// If a non-zero VF has been calculated, we check if I will be scalarized 1106 /// predication for that VF. 1107 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1108 1109 // Returns true if \p I is an instruction that will be predicated either 1110 // through scalar predication or masked load/store or masked gather/scatter. 1111 // Superset of instructions that return true for isScalarWithPredication. 1112 bool isPredicatedInst(Instruction *I) { 1113 if (!blockNeedsPredication(I->getParent())) 1114 return false; 1115 // Loads and stores that need some form of masked operation are predicated 1116 // instructions. 1117 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1118 return Legal->isMaskRequired(I); 1119 return isScalarWithPredication(I); 1120 } 1121 1122 /// Returns true if \p I is a memory instruction with consecutive memory 1123 /// access that can be widened. 1124 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1125 1126 /// Returns true if \p I is a memory instruction in an interleaved-group 1127 /// of memory accesses that can be vectorized with wide vector loads/stores 1128 /// and shuffles. 1129 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1130 1131 /// Check if \p Instr belongs to any interleaved access group. 1132 bool isAccessInterleaved(Instruction *Instr) { 1133 return InterleaveInfo.isInterleaved(Instr); 1134 } 1135 1136 /// Get the interleaved access group that \p Instr belongs to. 1137 const InterleaveGroup<Instruction> * 1138 getInterleavedAccessGroup(Instruction *Instr) { 1139 return InterleaveInfo.getInterleaveGroup(Instr); 1140 } 1141 1142 /// Returns true if an interleaved group requires a scalar iteration 1143 /// to handle accesses with gaps, and there is nothing preventing us from 1144 /// creating a scalar epilogue. 1145 bool requiresScalarEpilogue() const { 1146 return IsScalarEpilogueAllowed && InterleaveInfo.requiresScalarEpilogue(); 1147 } 1148 1149 /// Returns true if a scalar epilogue is not allowed due to optsize. 1150 bool isScalarEpilogueAllowed() const { return IsScalarEpilogueAllowed; } 1151 1152 /// Returns true if all loop blocks should be masked to fold tail loop. 1153 bool foldTailByMasking() const { return FoldTailByMasking; } 1154 1155 bool blockNeedsPredication(BasicBlock *BB) { 1156 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1157 } 1158 1159 private: 1160 unsigned NumPredStores = 0; 1161 1162 /// \return An upper bound for the vectorization factor, larger than zero. 1163 /// One is returned if vectorization should best be avoided due to cost. 1164 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 1165 1166 /// The vectorization cost is a combination of the cost itself and a boolean 1167 /// indicating whether any of the contributing operations will actually 1168 /// operate on 1169 /// vector values after type legalization in the backend. If this latter value 1170 /// is 1171 /// false, then all operations will be scalarized (i.e. no vectorization has 1172 /// actually taken place). 1173 using VectorizationCostTy = std::pair<unsigned, bool>; 1174 1175 /// Returns the expected execution cost. The unit of the cost does 1176 /// not matter because we use the 'cost' units to compare different 1177 /// vector widths. The cost that is returned is *not* normalized by 1178 /// the factor width. 1179 VectorizationCostTy expectedCost(unsigned VF); 1180 1181 /// Returns the execution time cost of an instruction for a given vector 1182 /// width. Vector width of one means scalar. 1183 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1184 1185 /// The cost-computation logic from getInstructionCost which provides 1186 /// the vector type as an output parameter. 1187 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1188 1189 /// Calculate vectorization cost of memory instruction \p I. 1190 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1191 1192 /// The cost computation for scalarized memory instruction. 1193 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1194 1195 /// The cost computation for interleaving group of memory instructions. 1196 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1197 1198 /// The cost computation for Gather/Scatter instruction. 1199 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1200 1201 /// The cost computation for widening instruction \p I with consecutive 1202 /// memory access. 1203 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1204 1205 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1206 /// Load: scalar load + broadcast. 1207 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1208 /// element) 1209 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1210 1211 /// Returns whether the instruction is a load or store and will be a emitted 1212 /// as a vector operation. 1213 bool isConsecutiveLoadOrStore(Instruction *I); 1214 1215 /// Returns true if an artificially high cost for emulated masked memrefs 1216 /// should be used. 1217 bool useEmulatedMaskMemRefHack(Instruction *I); 1218 1219 /// Create an analysis remark that explains why vectorization failed 1220 /// 1221 /// \p RemarkName is the identifier for the remark. \return the remark object 1222 /// that can be streamed to. 1223 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1224 return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1225 RemarkName, TheLoop); 1226 } 1227 1228 /// Map of scalar integer values to the smallest bitwidth they can be legally 1229 /// represented as. The vector equivalents of these values should be truncated 1230 /// to this type. 1231 MapVector<Instruction *, uint64_t> MinBWs; 1232 1233 /// A type representing the costs for instructions if they were to be 1234 /// scalarized rather than vectorized. The entries are Instruction-Cost 1235 /// pairs. 1236 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1237 1238 /// A set containing all BasicBlocks that are known to present after 1239 /// vectorization as a predicated block. 1240 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1241 1242 /// Records whether it is allowed to have the original scalar loop execute at 1243 /// least once. This may be needed as a fallback loop in case runtime 1244 /// aliasing/dependence checks fail, or to handle the tail/remainder 1245 /// iterations when the trip count is unknown or doesn't divide by the VF, 1246 /// or as a peel-loop to handle gaps in interleave-groups. 1247 /// Under optsize and when the trip count is very small we don't allow any 1248 /// iterations to execute in the scalar loop. 1249 bool IsScalarEpilogueAllowed = true; 1250 1251 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1252 bool FoldTailByMasking = false; 1253 1254 /// A map holding scalar costs for different vectorization factors. The 1255 /// presence of a cost for an instruction in the mapping indicates that the 1256 /// instruction will be scalarized when vectorizing with the associated 1257 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1258 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1259 1260 /// Holds the instructions known to be uniform after vectorization. 1261 /// The data is collected per VF. 1262 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1263 1264 /// Holds the instructions known to be scalar after vectorization. 1265 /// The data is collected per VF. 1266 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1267 1268 /// Holds the instructions (address computations) that are forced to be 1269 /// scalarized. 1270 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1271 1272 /// Returns the expected difference in cost from scalarizing the expression 1273 /// feeding a predicated instruction \p PredInst. The instructions to 1274 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1275 /// non-negative return value implies the expression will be scalarized. 1276 /// Currently, only single-use chains are considered for scalarization. 1277 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1278 unsigned VF); 1279 1280 /// Collect the instructions that are uniform after vectorization. An 1281 /// instruction is uniform if we represent it with a single scalar value in 1282 /// the vectorized loop corresponding to each vector iteration. Examples of 1283 /// uniform instructions include pointer operands of consecutive or 1284 /// interleaved memory accesses. Note that although uniformity implies an 1285 /// instruction will be scalar, the reverse is not true. In general, a 1286 /// scalarized instruction will be represented by VF scalar values in the 1287 /// vectorized loop, each corresponding to an iteration of the original 1288 /// scalar loop. 1289 void collectLoopUniforms(unsigned VF); 1290 1291 /// Collect the instructions that are scalar after vectorization. An 1292 /// instruction is scalar if it is known to be uniform or will be scalarized 1293 /// during vectorization. Non-uniform scalarized instructions will be 1294 /// represented by VF values in the vectorized loop, each corresponding to an 1295 /// iteration of the original scalar loop. 1296 void collectLoopScalars(unsigned VF); 1297 1298 /// Keeps cost model vectorization decision and cost for instructions. 1299 /// Right now it is used for memory instructions only. 1300 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1301 std::pair<InstWidening, unsigned>>; 1302 1303 DecisionList WideningDecisions; 1304 1305 public: 1306 /// The loop that we evaluate. 1307 Loop *TheLoop; 1308 1309 /// Predicated scalar evolution analysis. 1310 PredicatedScalarEvolution &PSE; 1311 1312 /// Loop Info analysis. 1313 LoopInfo *LI; 1314 1315 /// Vectorization legality. 1316 LoopVectorizationLegality *Legal; 1317 1318 /// Vector target information. 1319 const TargetTransformInfo &TTI; 1320 1321 /// Target Library Info. 1322 const TargetLibraryInfo *TLI; 1323 1324 /// Demanded bits analysis. 1325 DemandedBits *DB; 1326 1327 /// Assumption cache. 1328 AssumptionCache *AC; 1329 1330 /// Interface to emit optimization remarks. 1331 OptimizationRemarkEmitter *ORE; 1332 1333 const Function *TheFunction; 1334 1335 /// Loop Vectorize Hint. 1336 const LoopVectorizeHints *Hints; 1337 1338 /// The interleave access information contains groups of interleaved accesses 1339 /// with the same stride and close to each other. 1340 InterleavedAccessInfo &InterleaveInfo; 1341 1342 /// Values to ignore in the cost model. 1343 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1344 1345 /// Values to ignore in the cost model when VF > 1. 1346 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1347 }; 1348 1349 } // end namespace llvm 1350 1351 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1352 // vectorization. The loop needs to be annotated with #pragma omp simd 1353 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1354 // vector length information is not provided, vectorization is not considered 1355 // explicit. Interleave hints are not allowed either. These limitations will be 1356 // relaxed in the future. 1357 // Please, note that we are currently forced to abuse the pragma 'clang 1358 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1359 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1360 // provides *explicit vectorization hints* (LV can bypass legal checks and 1361 // assume that vectorization is legal). However, both hints are implemented 1362 // using the same metadata (llvm.loop.vectorize, processed by 1363 // LoopVectorizeHints). This will be fixed in the future when the native IR 1364 // representation for pragma 'omp simd' is introduced. 1365 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1366 OptimizationRemarkEmitter *ORE) { 1367 assert(!OuterLp->empty() && "This is not an outer loop"); 1368 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1369 1370 // Only outer loops with an explicit vectorization hint are supported. 1371 // Unannotated outer loops are ignored. 1372 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1373 return false; 1374 1375 Function *Fn = OuterLp->getHeader()->getParent(); 1376 if (!Hints.allowVectorization(Fn, OuterLp, 1377 true /*VectorizeOnlyWhenForced*/)) { 1378 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1379 return false; 1380 } 1381 1382 if (!Hints.getWidth()) { 1383 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n"); 1384 Hints.emitRemarkWithHints(); 1385 return false; 1386 } 1387 1388 if (Hints.getInterleave() > 1) { 1389 // TODO: Interleave support is future work. 1390 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1391 "outer loops.\n"); 1392 Hints.emitRemarkWithHints(); 1393 return false; 1394 } 1395 1396 return true; 1397 } 1398 1399 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1400 OptimizationRemarkEmitter *ORE, 1401 SmallVectorImpl<Loop *> &V) { 1402 // Collect inner loops and outer loops without irreducible control flow. For 1403 // now, only collect outer loops that have explicit vectorization hints. If we 1404 // are stress testing the VPlan H-CFG construction, we collect the outermost 1405 // loop of every loop nest. 1406 if (L.empty() || VPlanBuildStressTest || 1407 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1408 LoopBlocksRPO RPOT(&L); 1409 RPOT.perform(LI); 1410 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1411 V.push_back(&L); 1412 // TODO: Collect inner loops inside marked outer loops in case 1413 // vectorization fails for the outer loop. Do not invoke 1414 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1415 // already known to be reducible. We can use an inherited attribute for 1416 // that. 1417 return; 1418 } 1419 } 1420 for (Loop *InnerL : L) 1421 collectSupportedLoops(*InnerL, LI, ORE, V); 1422 } 1423 1424 namespace { 1425 1426 /// The LoopVectorize Pass. 1427 struct LoopVectorize : public FunctionPass { 1428 /// Pass identification, replacement for typeid 1429 static char ID; 1430 1431 LoopVectorizePass Impl; 1432 1433 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1434 bool VectorizeOnlyWhenForced = false) 1435 : FunctionPass(ID) { 1436 Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced; 1437 Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced; 1438 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1439 } 1440 1441 bool runOnFunction(Function &F) override { 1442 if (skipFunction(F)) 1443 return false; 1444 1445 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1446 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1447 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1448 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1449 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1450 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1451 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1452 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1453 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1454 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1455 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1456 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1457 1458 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1459 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1460 1461 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1462 GetLAA, *ORE); 1463 } 1464 1465 void getAnalysisUsage(AnalysisUsage &AU) const override { 1466 AU.addRequired<AssumptionCacheTracker>(); 1467 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1468 AU.addRequired<DominatorTreeWrapperPass>(); 1469 AU.addRequired<LoopInfoWrapperPass>(); 1470 AU.addRequired<ScalarEvolutionWrapperPass>(); 1471 AU.addRequired<TargetTransformInfoWrapperPass>(); 1472 AU.addRequired<AAResultsWrapperPass>(); 1473 AU.addRequired<LoopAccessLegacyAnalysis>(); 1474 AU.addRequired<DemandedBitsWrapperPass>(); 1475 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1476 1477 // We currently do not preserve loopinfo/dominator analyses with outer loop 1478 // vectorization. Until this is addressed, mark these analyses as preserved 1479 // only for non-VPlan-native path. 1480 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1481 if (!EnableVPlanNativePath) { 1482 AU.addPreserved<LoopInfoWrapperPass>(); 1483 AU.addPreserved<DominatorTreeWrapperPass>(); 1484 } 1485 1486 AU.addPreserved<BasicAAWrapperPass>(); 1487 AU.addPreserved<GlobalsAAWrapperPass>(); 1488 } 1489 }; 1490 1491 } // end anonymous namespace 1492 1493 //===----------------------------------------------------------------------===// 1494 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1495 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1496 //===----------------------------------------------------------------------===// 1497 1498 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1499 // We need to place the broadcast of invariant variables outside the loop, 1500 // but only if it's proven safe to do so. Else, broadcast will be inside 1501 // vector loop body. 1502 Instruction *Instr = dyn_cast<Instruction>(V); 1503 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1504 (!Instr || 1505 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1506 // Place the code for broadcasting invariant variables in the new preheader. 1507 IRBuilder<>::InsertPointGuard Guard(Builder); 1508 if (SafeToHoist) 1509 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1510 1511 // Broadcast the scalar into all locations in the vector. 1512 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1513 1514 return Shuf; 1515 } 1516 1517 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1518 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1519 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1520 "Expected either an induction phi-node or a truncate of it!"); 1521 Value *Start = II.getStartValue(); 1522 1523 // Construct the initial value of the vector IV in the vector loop preheader 1524 auto CurrIP = Builder.saveIP(); 1525 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1526 if (isa<TruncInst>(EntryVal)) { 1527 assert(Start->getType()->isIntegerTy() && 1528 "Truncation requires an integer type"); 1529 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1530 Step = Builder.CreateTrunc(Step, TruncType); 1531 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1532 } 1533 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1534 Value *SteppedStart = 1535 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1536 1537 // We create vector phi nodes for both integer and floating-point induction 1538 // variables. Here, we determine the kind of arithmetic we will perform. 1539 Instruction::BinaryOps AddOp; 1540 Instruction::BinaryOps MulOp; 1541 if (Step->getType()->isIntegerTy()) { 1542 AddOp = Instruction::Add; 1543 MulOp = Instruction::Mul; 1544 } else { 1545 AddOp = II.getInductionOpcode(); 1546 MulOp = Instruction::FMul; 1547 } 1548 1549 // Multiply the vectorization factor by the step using integer or 1550 // floating-point arithmetic as appropriate. 1551 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1552 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1553 1554 // Create a vector splat to use in the induction update. 1555 // 1556 // FIXME: If the step is non-constant, we create the vector splat with 1557 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1558 // handle a constant vector splat. 1559 Value *SplatVF = isa<Constant>(Mul) 1560 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 1561 : Builder.CreateVectorSplat(VF, Mul); 1562 Builder.restoreIP(CurrIP); 1563 1564 // We may need to add the step a number of times, depending on the unroll 1565 // factor. The last of those goes into the PHI. 1566 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1567 &*LoopVectorBody->getFirstInsertionPt()); 1568 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1569 Instruction *LastInduction = VecInd; 1570 for (unsigned Part = 0; Part < UF; ++Part) { 1571 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1572 1573 if (isa<TruncInst>(EntryVal)) 1574 addMetadata(LastInduction, EntryVal); 1575 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1576 1577 LastInduction = cast<Instruction>(addFastMathFlag( 1578 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1579 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1580 } 1581 1582 // Move the last step to the end of the latch block. This ensures consistent 1583 // placement of all induction updates. 1584 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1585 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1586 auto *ICmp = cast<Instruction>(Br->getCondition()); 1587 LastInduction->moveBefore(ICmp); 1588 LastInduction->setName("vec.ind.next"); 1589 1590 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1591 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1592 } 1593 1594 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1595 return Cost->isScalarAfterVectorization(I, VF) || 1596 Cost->isProfitableToScalarize(I, VF); 1597 } 1598 1599 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1600 if (shouldScalarizeInstruction(IV)) 1601 return true; 1602 auto isScalarInst = [&](User *U) -> bool { 1603 auto *I = cast<Instruction>(U); 1604 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1605 }; 1606 return llvm::any_of(IV->users(), isScalarInst); 1607 } 1608 1609 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1610 const InductionDescriptor &ID, const Instruction *EntryVal, 1611 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1612 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1613 "Expected either an induction phi-node or a truncate of it!"); 1614 1615 // This induction variable is not the phi from the original loop but the 1616 // newly-created IV based on the proof that casted Phi is equal to the 1617 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1618 // re-uses the same InductionDescriptor that original IV uses but we don't 1619 // have to do any recording in this case - that is done when original IV is 1620 // processed. 1621 if (isa<TruncInst>(EntryVal)) 1622 return; 1623 1624 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1625 if (Casts.empty()) 1626 return; 1627 // Only the first Cast instruction in the Casts vector is of interest. 1628 // The rest of the Casts (if exist) have no uses outside the 1629 // induction update chain itself. 1630 Instruction *CastInst = *Casts.begin(); 1631 if (Lane < UINT_MAX) 1632 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1633 else 1634 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1635 } 1636 1637 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1638 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1639 "Primary induction variable must have an integer type"); 1640 1641 auto II = Legal->getInductionVars()->find(IV); 1642 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 1643 1644 auto ID = II->second; 1645 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1646 1647 // The scalar value to broadcast. This will be derived from the canonical 1648 // induction variable. 1649 Value *ScalarIV = nullptr; 1650 1651 // The value from the original loop to which we are mapping the new induction 1652 // variable. 1653 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1654 1655 // True if we have vectorized the induction variable. 1656 auto VectorizedIV = false; 1657 1658 // Determine if we want a scalar version of the induction variable. This is 1659 // true if the induction variable itself is not widened, or if it has at 1660 // least one user in the loop that is not widened. 1661 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 1662 1663 // Generate code for the induction step. Note that induction steps are 1664 // required to be loop-invariant 1665 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 1666 "Induction step should be loop invariant"); 1667 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1668 Value *Step = nullptr; 1669 if (PSE.getSE()->isSCEVable(IV->getType())) { 1670 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1671 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 1672 LoopVectorPreHeader->getTerminator()); 1673 } else { 1674 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 1675 } 1676 1677 // Try to create a new independent vector induction variable. If we can't 1678 // create the phi node, we will splat the scalar induction variable in each 1679 // loop iteration. 1680 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 1681 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1682 VectorizedIV = true; 1683 } 1684 1685 // If we haven't yet vectorized the induction variable, or if we will create 1686 // a scalar one, we need to define the scalar induction variable and step 1687 // values. If we were given a truncation type, truncate the canonical 1688 // induction variable and step. Otherwise, derive these values from the 1689 // induction descriptor. 1690 if (!VectorizedIV || NeedsScalarIV) { 1691 ScalarIV = Induction; 1692 if (IV != OldInduction) { 1693 ScalarIV = IV->getType()->isIntegerTy() 1694 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1695 : Builder.CreateCast(Instruction::SIToFP, Induction, 1696 IV->getType()); 1697 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1698 ScalarIV->setName("offset.idx"); 1699 } 1700 if (Trunc) { 1701 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1702 assert(Step->getType()->isIntegerTy() && 1703 "Truncation requires an integer step"); 1704 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1705 Step = Builder.CreateTrunc(Step, TruncType); 1706 } 1707 } 1708 1709 // If we haven't yet vectorized the induction variable, splat the scalar 1710 // induction variable, and build the necessary step vectors. 1711 // TODO: Don't do it unless the vectorized IV is really required. 1712 if (!VectorizedIV) { 1713 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1714 for (unsigned Part = 0; Part < UF; ++Part) { 1715 Value *EntryPart = 1716 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1717 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1718 if (Trunc) 1719 addMetadata(EntryPart, Trunc); 1720 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1721 } 1722 } 1723 1724 // If an induction variable is only used for counting loop iterations or 1725 // calculating addresses, it doesn't need to be widened. Create scalar steps 1726 // that can be used by instructions we will later scalarize. Note that the 1727 // addition of the scalar steps will not increase the number of instructions 1728 // in the loop in the common case prior to InstCombine. We will be trading 1729 // one vector extract for each scalar step. 1730 if (NeedsScalarIV) 1731 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1732 } 1733 1734 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1735 Instruction::BinaryOps BinOp) { 1736 // Create and check the types. 1737 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1738 int VLen = Val->getType()->getVectorNumElements(); 1739 1740 Type *STy = Val->getType()->getScalarType(); 1741 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1742 "Induction Step must be an integer or FP"); 1743 assert(Step->getType() == STy && "Step has wrong type"); 1744 1745 SmallVector<Constant *, 8> Indices; 1746 1747 if (STy->isIntegerTy()) { 1748 // Create a vector of consecutive numbers from zero to VF. 1749 for (int i = 0; i < VLen; ++i) 1750 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1751 1752 // Add the consecutive indices to the vector value. 1753 Constant *Cv = ConstantVector::get(Indices); 1754 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1755 Step = Builder.CreateVectorSplat(VLen, Step); 1756 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1757 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1758 // which can be found from the original scalar operations. 1759 Step = Builder.CreateMul(Cv, Step); 1760 return Builder.CreateAdd(Val, Step, "induction"); 1761 } 1762 1763 // Floating point induction. 1764 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1765 "Binary Opcode should be specified for FP induction"); 1766 // Create a vector of consecutive numbers from zero to VF. 1767 for (int i = 0; i < VLen; ++i) 1768 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1769 1770 // Add the consecutive indices to the vector value. 1771 Constant *Cv = ConstantVector::get(Indices); 1772 1773 Step = Builder.CreateVectorSplat(VLen, Step); 1774 1775 // Floating point operations had to be 'fast' to enable the induction. 1776 FastMathFlags Flags; 1777 Flags.setFast(); 1778 1779 Value *MulOp = Builder.CreateFMul(Cv, Step); 1780 if (isa<Instruction>(MulOp)) 1781 // Have to check, MulOp may be a constant 1782 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1783 1784 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1785 if (isa<Instruction>(BOp)) 1786 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1787 return BOp; 1788 } 1789 1790 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1791 Instruction *EntryVal, 1792 const InductionDescriptor &ID) { 1793 // We shouldn't have to build scalar steps if we aren't vectorizing. 1794 assert(VF > 1 && "VF should be greater than one"); 1795 1796 // Get the value type and ensure it and the step have the same integer type. 1797 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1798 assert(ScalarIVTy == Step->getType() && 1799 "Val and Step should have the same type"); 1800 1801 // We build scalar steps for both integer and floating-point induction 1802 // variables. Here, we determine the kind of arithmetic we will perform. 1803 Instruction::BinaryOps AddOp; 1804 Instruction::BinaryOps MulOp; 1805 if (ScalarIVTy->isIntegerTy()) { 1806 AddOp = Instruction::Add; 1807 MulOp = Instruction::Mul; 1808 } else { 1809 AddOp = ID.getInductionOpcode(); 1810 MulOp = Instruction::FMul; 1811 } 1812 1813 // Determine the number of scalars we need to generate for each unroll 1814 // iteration. If EntryVal is uniform, we only need to generate the first 1815 // lane. Otherwise, we generate all VF values. 1816 unsigned Lanes = 1817 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 1818 : VF; 1819 // Compute the scalar steps and save the results in VectorLoopValueMap. 1820 for (unsigned Part = 0; Part < UF; ++Part) { 1821 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 1822 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 1823 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 1824 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 1825 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 1826 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 1827 } 1828 } 1829 } 1830 1831 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 1832 assert(V != Induction && "The new induction variable should not be used."); 1833 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 1834 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1835 1836 // If we have a stride that is replaced by one, do it here. Defer this for 1837 // the VPlan-native path until we start running Legal checks in that path. 1838 if (!EnableVPlanNativePath && Legal->hasStride(V)) 1839 V = ConstantInt::get(V->getType(), 1); 1840 1841 // If we have a vector mapped to this value, return it. 1842 if (VectorLoopValueMap.hasVectorValue(V, Part)) 1843 return VectorLoopValueMap.getVectorValue(V, Part); 1844 1845 // If the value has not been vectorized, check if it has been scalarized 1846 // instead. If it has been scalarized, and we actually need the value in 1847 // vector form, we will construct the vector values on demand. 1848 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 1849 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 1850 1851 // If we've scalarized a value, that value should be an instruction. 1852 auto *I = cast<Instruction>(V); 1853 1854 // If we aren't vectorizing, we can just copy the scalar map values over to 1855 // the vector map. 1856 if (VF == 1) { 1857 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 1858 return ScalarValue; 1859 } 1860 1861 // Get the last scalar instruction we generated for V and Part. If the value 1862 // is known to be uniform after vectorization, this corresponds to lane zero 1863 // of the Part unroll iteration. Otherwise, the last instruction is the one 1864 // we created for the last vector lane of the Part unroll iteration. 1865 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 1866 auto *LastInst = cast<Instruction>( 1867 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 1868 1869 // Set the insert point after the last scalarized instruction. This ensures 1870 // the insertelement sequence will directly follow the scalar definitions. 1871 auto OldIP = Builder.saveIP(); 1872 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 1873 Builder.SetInsertPoint(&*NewIP); 1874 1875 // However, if we are vectorizing, we need to construct the vector values. 1876 // If the value is known to be uniform after vectorization, we can just 1877 // broadcast the scalar value corresponding to lane zero for each unroll 1878 // iteration. Otherwise, we construct the vector values using insertelement 1879 // instructions. Since the resulting vectors are stored in 1880 // VectorLoopValueMap, we will only generate the insertelements once. 1881 Value *VectorValue = nullptr; 1882 if (Cost->isUniformAfterVectorization(I, VF)) { 1883 VectorValue = getBroadcastInstrs(ScalarValue); 1884 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 1885 } else { 1886 // Initialize packing with insertelements to start from undef. 1887 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 1888 VectorLoopValueMap.setVectorValue(V, Part, Undef); 1889 for (unsigned Lane = 0; Lane < VF; ++Lane) 1890 packScalarIntoVectorValue(V, {Part, Lane}); 1891 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 1892 } 1893 Builder.restoreIP(OldIP); 1894 return VectorValue; 1895 } 1896 1897 // If this scalar is unknown, assume that it is a constant or that it is 1898 // loop invariant. Broadcast V and save the value for future uses. 1899 Value *B = getBroadcastInstrs(V); 1900 VectorLoopValueMap.setVectorValue(V, Part, B); 1901 return B; 1902 } 1903 1904 Value * 1905 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 1906 const VPIteration &Instance) { 1907 // If the value is not an instruction contained in the loop, it should 1908 // already be scalar. 1909 if (OrigLoop->isLoopInvariant(V)) 1910 return V; 1911 1912 assert(Instance.Lane > 0 1913 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 1914 : true && "Uniform values only have lane zero"); 1915 1916 // If the value from the original loop has not been vectorized, it is 1917 // represented by UF x VF scalar values in the new loop. Return the requested 1918 // scalar value. 1919 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 1920 return VectorLoopValueMap.getScalarValue(V, Instance); 1921 1922 // If the value has not been scalarized, get its entry in VectorLoopValueMap 1923 // for the given unroll part. If this entry is not a vector type (i.e., the 1924 // vectorization factor is one), there is no need to generate an 1925 // extractelement instruction. 1926 auto *U = getOrCreateVectorValue(V, Instance.Part); 1927 if (!U->getType()->isVectorTy()) { 1928 assert(VF == 1 && "Value not scalarized has non-vector type"); 1929 return U; 1930 } 1931 1932 // Otherwise, the value from the original loop has been vectorized and is 1933 // represented by UF vector values. Extract and return the requested scalar 1934 // value from the appropriate vector lane. 1935 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 1936 } 1937 1938 void InnerLoopVectorizer::packScalarIntoVectorValue( 1939 Value *V, const VPIteration &Instance) { 1940 assert(V != Induction && "The new induction variable should not be used."); 1941 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 1942 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1943 1944 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 1945 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 1946 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 1947 Builder.getInt32(Instance.Lane)); 1948 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 1949 } 1950 1951 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 1952 assert(Vec->getType()->isVectorTy() && "Invalid type"); 1953 SmallVector<Constant *, 8> ShuffleMask; 1954 for (unsigned i = 0; i < VF; ++i) 1955 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 1956 1957 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 1958 ConstantVector::get(ShuffleMask), 1959 "reverse"); 1960 } 1961 1962 // Return whether we allow using masked interleave-groups (for dealing with 1963 // strided loads/stores that reside in predicated blocks, or for dealing 1964 // with gaps). 1965 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 1966 // If an override option has been passed in for interleaved accesses, use it. 1967 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 1968 return EnableMaskedInterleavedMemAccesses; 1969 1970 return TTI.enableMaskedInterleavedAccessVectorization(); 1971 } 1972 1973 // Try to vectorize the interleave group that \p Instr belongs to. 1974 // 1975 // E.g. Translate following interleaved load group (factor = 3): 1976 // for (i = 0; i < N; i+=3) { 1977 // R = Pic[i]; // Member of index 0 1978 // G = Pic[i+1]; // Member of index 1 1979 // B = Pic[i+2]; // Member of index 2 1980 // ... // do something to R, G, B 1981 // } 1982 // To: 1983 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 1984 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 1985 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 1986 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 1987 // 1988 // Or translate following interleaved store group (factor = 3): 1989 // for (i = 0; i < N; i+=3) { 1990 // ... do something to R, G, B 1991 // Pic[i] = R; // Member of index 0 1992 // Pic[i+1] = G; // Member of index 1 1993 // Pic[i+2] = B; // Member of index 2 1994 // } 1995 // To: 1996 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 1997 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 1998 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 1999 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2000 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2001 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr, 2002 VectorParts *BlockInMask) { 2003 const InterleaveGroup<Instruction> *Group = 2004 Cost->getInterleavedAccessGroup(Instr); 2005 assert(Group && "Fail to get an interleaved access group."); 2006 2007 // Skip if current instruction is not the insert position. 2008 if (Instr != Group->getInsertPos()) 2009 return; 2010 2011 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2012 Value *Ptr = getLoadStorePointerOperand(Instr); 2013 2014 // Prepare for the vector type of the interleaved load/store. 2015 Type *ScalarTy = getMemInstValueType(Instr); 2016 unsigned InterleaveFactor = Group->getFactor(); 2017 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2018 Type *PtrTy = VecTy->getPointerTo(getLoadStoreAddressSpace(Instr)); 2019 2020 // Prepare for the new pointers. 2021 setDebugLocFromInst(Builder, Ptr); 2022 SmallVector<Value *, 2> NewPtrs; 2023 unsigned Index = Group->getIndex(Instr); 2024 2025 VectorParts Mask; 2026 bool IsMaskForCondRequired = BlockInMask; 2027 if (IsMaskForCondRequired) { 2028 Mask = *BlockInMask; 2029 // TODO: extend the masked interleaved-group support to reversed access. 2030 assert(!Group->isReverse() && "Reversed masked interleave-group " 2031 "not supported."); 2032 } 2033 2034 // If the group is reverse, adjust the index to refer to the last vector lane 2035 // instead of the first. We adjust the index from the first vector lane, 2036 // rather than directly getting the pointer for lane VF - 1, because the 2037 // pointer operand of the interleaved access is supposed to be uniform. For 2038 // uniform instructions, we're only required to generate a value for the 2039 // first vector lane in each unroll iteration. 2040 if (Group->isReverse()) 2041 Index += (VF - 1) * Group->getFactor(); 2042 2043 bool InBounds = false; 2044 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2045 InBounds = gep->isInBounds(); 2046 2047 for (unsigned Part = 0; Part < UF; Part++) { 2048 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2049 2050 // Notice current instruction could be any index. Need to adjust the address 2051 // to the member of index 0. 2052 // 2053 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2054 // b = A[i]; // Member of index 0 2055 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2056 // 2057 // E.g. A[i+1] = a; // Member of index 1 2058 // A[i] = b; // Member of index 0 2059 // A[i+2] = c; // Member of index 2 (Current instruction) 2060 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2061 NewPtr = Builder.CreateGEP(ScalarTy, NewPtr, Builder.getInt32(-Index)); 2062 if (InBounds) 2063 cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true); 2064 2065 // Cast to the vector pointer type. 2066 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2067 } 2068 2069 setDebugLocFromInst(Builder, Instr); 2070 Value *UndefVec = UndefValue::get(VecTy); 2071 2072 Value *MaskForGaps = nullptr; 2073 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2074 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2075 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2076 } 2077 2078 // Vectorize the interleaved load group. 2079 if (isa<LoadInst>(Instr)) { 2080 // For each unroll part, create a wide load for the group. 2081 SmallVector<Value *, 2> NewLoads; 2082 for (unsigned Part = 0; Part < UF; Part++) { 2083 Instruction *NewLoad; 2084 if (IsMaskForCondRequired || MaskForGaps) { 2085 assert(useMaskedInterleavedAccesses(*TTI) && 2086 "masked interleaved groups are not allowed."); 2087 Value *GroupMask = MaskForGaps; 2088 if (IsMaskForCondRequired) { 2089 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2090 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2091 Value *ShuffledMask = Builder.CreateShuffleVector( 2092 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2093 GroupMask = MaskForGaps 2094 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2095 MaskForGaps) 2096 : ShuffledMask; 2097 } 2098 NewLoad = 2099 Builder.CreateMaskedLoad(NewPtrs[Part], Group->getAlignment(), 2100 GroupMask, UndefVec, "wide.masked.vec"); 2101 } 2102 else 2103 NewLoad = Builder.CreateAlignedLoad(VecTy, NewPtrs[Part], 2104 Group->getAlignment(), "wide.vec"); 2105 Group->addMetadata(NewLoad); 2106 NewLoads.push_back(NewLoad); 2107 } 2108 2109 // For each member in the group, shuffle out the appropriate data from the 2110 // wide loads. 2111 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2112 Instruction *Member = Group->getMember(I); 2113 2114 // Skip the gaps in the group. 2115 if (!Member) 2116 continue; 2117 2118 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2119 for (unsigned Part = 0; Part < UF; Part++) { 2120 Value *StridedVec = Builder.CreateShuffleVector( 2121 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2122 2123 // If this member has different type, cast the result type. 2124 if (Member->getType() != ScalarTy) { 2125 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2126 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2127 } 2128 2129 if (Group->isReverse()) 2130 StridedVec = reverseVector(StridedVec); 2131 2132 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2133 } 2134 } 2135 return; 2136 } 2137 2138 // The sub vector type for current instruction. 2139 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2140 2141 // Vectorize the interleaved store group. 2142 for (unsigned Part = 0; Part < UF; Part++) { 2143 // Collect the stored vector from each member. 2144 SmallVector<Value *, 4> StoredVecs; 2145 for (unsigned i = 0; i < InterleaveFactor; i++) { 2146 // Interleaved store group doesn't allow a gap, so each index has a member 2147 Instruction *Member = Group->getMember(i); 2148 assert(Member && "Fail to get a member from an interleaved store group"); 2149 2150 Value *StoredVec = getOrCreateVectorValue( 2151 cast<StoreInst>(Member)->getValueOperand(), Part); 2152 if (Group->isReverse()) 2153 StoredVec = reverseVector(StoredVec); 2154 2155 // If this member has different type, cast it to a unified type. 2156 2157 if (StoredVec->getType() != SubVT) 2158 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2159 2160 StoredVecs.push_back(StoredVec); 2161 } 2162 2163 // Concatenate all vectors into a wide vector. 2164 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2165 2166 // Interleave the elements in the wide vector. 2167 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2168 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2169 "interleaved.vec"); 2170 2171 Instruction *NewStoreInstr; 2172 if (IsMaskForCondRequired) { 2173 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2174 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2175 Value *ShuffledMask = Builder.CreateShuffleVector( 2176 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2177 NewStoreInstr = Builder.CreateMaskedStore( 2178 IVec, NewPtrs[Part], Group->getAlignment(), ShuffledMask); 2179 } 2180 else 2181 NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part], 2182 Group->getAlignment()); 2183 2184 Group->addMetadata(NewStoreInstr); 2185 } 2186 } 2187 2188 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2189 VectorParts *BlockInMask) { 2190 // Attempt to issue a wide load. 2191 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2192 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2193 2194 assert((LI || SI) && "Invalid Load/Store instruction"); 2195 2196 LoopVectorizationCostModel::InstWidening Decision = 2197 Cost->getWideningDecision(Instr, VF); 2198 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2199 "CM decision should be taken at this point"); 2200 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2201 return vectorizeInterleaveGroup(Instr); 2202 2203 Type *ScalarDataTy = getMemInstValueType(Instr); 2204 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2205 Value *Ptr = getLoadStorePointerOperand(Instr); 2206 unsigned Alignment = getLoadStoreAlignment(Instr); 2207 // An alignment of 0 means target abi alignment. We need to use the scalar's 2208 // target abi alignment in such a case. 2209 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2210 if (!Alignment) 2211 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2212 unsigned AddressSpace = getLoadStoreAddressSpace(Instr); 2213 2214 // Determine if the pointer operand of the access is either consecutive or 2215 // reverse consecutive. 2216 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2217 bool ConsecutiveStride = 2218 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2219 bool CreateGatherScatter = 2220 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2221 2222 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2223 // gather/scatter. Otherwise Decision should have been to Scalarize. 2224 assert((ConsecutiveStride || CreateGatherScatter) && 2225 "The instruction should be scalarized"); 2226 2227 // Handle consecutive loads/stores. 2228 if (ConsecutiveStride) 2229 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 2230 2231 VectorParts Mask; 2232 bool isMaskRequired = BlockInMask; 2233 if (isMaskRequired) 2234 Mask = *BlockInMask; 2235 2236 bool InBounds = false; 2237 if (auto *gep = dyn_cast<GetElementPtrInst>( 2238 getLoadStorePointerOperand(Instr)->stripPointerCasts())) 2239 InBounds = gep->isInBounds(); 2240 2241 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2242 // Calculate the pointer for the specific unroll-part. 2243 GetElementPtrInst *PartPtr = nullptr; 2244 2245 if (Reverse) { 2246 // If the address is consecutive but reversed, then the 2247 // wide store needs to start at the last vector element. 2248 PartPtr = cast<GetElementPtrInst>( 2249 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF))); 2250 PartPtr->setIsInBounds(InBounds); 2251 PartPtr = cast<GetElementPtrInst>( 2252 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF))); 2253 PartPtr->setIsInBounds(InBounds); 2254 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2255 Mask[Part] = reverseVector(Mask[Part]); 2256 } else { 2257 PartPtr = cast<GetElementPtrInst>( 2258 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF))); 2259 PartPtr->setIsInBounds(InBounds); 2260 } 2261 2262 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2263 }; 2264 2265 // Handle Stores: 2266 if (SI) { 2267 setDebugLocFromInst(Builder, SI); 2268 2269 for (unsigned Part = 0; Part < UF; ++Part) { 2270 Instruction *NewSI = nullptr; 2271 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 2272 if (CreateGatherScatter) { 2273 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2274 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2275 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2276 MaskPart); 2277 } else { 2278 if (Reverse) { 2279 // If we store to reverse consecutive memory locations, then we need 2280 // to reverse the order of elements in the stored value. 2281 StoredVal = reverseVector(StoredVal); 2282 // We don't want to update the value in the map as it might be used in 2283 // another expression. So don't call resetVectorValue(StoredVal). 2284 } 2285 auto *VecPtr = CreateVecPtr(Part, Ptr); 2286 if (isMaskRequired) 2287 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2288 Mask[Part]); 2289 else 2290 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2291 } 2292 addMetadata(NewSI, SI); 2293 } 2294 return; 2295 } 2296 2297 // Handle loads. 2298 assert(LI && "Must have a load instruction"); 2299 setDebugLocFromInst(Builder, LI); 2300 for (unsigned Part = 0; Part < UF; ++Part) { 2301 Value *NewLI; 2302 if (CreateGatherScatter) { 2303 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2304 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2305 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2306 nullptr, "wide.masked.gather"); 2307 addMetadata(NewLI, LI); 2308 } else { 2309 auto *VecPtr = CreateVecPtr(Part, Ptr); 2310 if (isMaskRequired) 2311 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2312 UndefValue::get(DataTy), 2313 "wide.masked.load"); 2314 else 2315 NewLI = 2316 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2317 2318 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2319 addMetadata(NewLI, LI); 2320 if (Reverse) 2321 NewLI = reverseVector(NewLI); 2322 } 2323 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2324 } 2325 } 2326 2327 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2328 const VPIteration &Instance, 2329 bool IfPredicateInstr) { 2330 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2331 2332 setDebugLocFromInst(Builder, Instr); 2333 2334 // Does this instruction return a value ? 2335 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2336 2337 Instruction *Cloned = Instr->clone(); 2338 if (!IsVoidRetTy) 2339 Cloned->setName(Instr->getName() + ".cloned"); 2340 2341 // Replace the operands of the cloned instructions with their scalar 2342 // equivalents in the new loop. 2343 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2344 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2345 Cloned->setOperand(op, NewOp); 2346 } 2347 addNewMetadata(Cloned, Instr); 2348 2349 // Place the cloned scalar in the new loop. 2350 Builder.Insert(Cloned); 2351 2352 // Add the cloned scalar to the scalar map entry. 2353 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2354 2355 // If we just cloned a new assumption, add it the assumption cache. 2356 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2357 if (II->getIntrinsicID() == Intrinsic::assume) 2358 AC->registerAssumption(II); 2359 2360 // End if-block. 2361 if (IfPredicateInstr) 2362 PredicatedInstructions.push_back(Cloned); 2363 } 2364 2365 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2366 Value *End, Value *Step, 2367 Instruction *DL) { 2368 BasicBlock *Header = L->getHeader(); 2369 BasicBlock *Latch = L->getLoopLatch(); 2370 // As we're just creating this loop, it's possible no latch exists 2371 // yet. If so, use the header as this will be a single block loop. 2372 if (!Latch) 2373 Latch = Header; 2374 2375 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2376 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2377 setDebugLocFromInst(Builder, OldInst); 2378 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2379 2380 Builder.SetInsertPoint(Latch->getTerminator()); 2381 setDebugLocFromInst(Builder, OldInst); 2382 2383 // Create i+1 and fill the PHINode. 2384 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2385 Induction->addIncoming(Start, L->getLoopPreheader()); 2386 Induction->addIncoming(Next, Latch); 2387 // Create the compare. 2388 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2389 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2390 2391 // Now we have two terminators. Remove the old one from the block. 2392 Latch->getTerminator()->eraseFromParent(); 2393 2394 return Induction; 2395 } 2396 2397 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2398 if (TripCount) 2399 return TripCount; 2400 2401 assert(L && "Create Trip Count for null loop."); 2402 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2403 // Find the loop boundaries. 2404 ScalarEvolution *SE = PSE.getSE(); 2405 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2406 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2407 "Invalid loop count"); 2408 2409 Type *IdxTy = Legal->getWidestInductionType(); 2410 assert(IdxTy && "No type for induction"); 2411 2412 // The exit count might have the type of i64 while the phi is i32. This can 2413 // happen if we have an induction variable that is sign extended before the 2414 // compare. The only way that we get a backedge taken count is that the 2415 // induction variable was signed and as such will not overflow. In such a case 2416 // truncation is legal. 2417 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2418 IdxTy->getPrimitiveSizeInBits()) 2419 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2420 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2421 2422 // Get the total trip count from the count by adding 1. 2423 const SCEV *ExitCount = SE->getAddExpr( 2424 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2425 2426 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2427 2428 // Expand the trip count and place the new instructions in the preheader. 2429 // Notice that the pre-header does not change, only the loop body. 2430 SCEVExpander Exp(*SE, DL, "induction"); 2431 2432 // Count holds the overall loop count (N). 2433 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2434 L->getLoopPreheader()->getTerminator()); 2435 2436 if (TripCount->getType()->isPointerTy()) 2437 TripCount = 2438 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2439 L->getLoopPreheader()->getTerminator()); 2440 2441 return TripCount; 2442 } 2443 2444 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2445 if (VectorTripCount) 2446 return VectorTripCount; 2447 2448 Value *TC = getOrCreateTripCount(L); 2449 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2450 2451 Type *Ty = TC->getType(); 2452 Constant *Step = ConstantInt::get(Ty, VF * UF); 2453 2454 // If the tail is to be folded by masking, round the number of iterations N 2455 // up to a multiple of Step instead of rounding down. This is done by first 2456 // adding Step-1 and then rounding down. Note that it's ok if this addition 2457 // overflows: the vector induction variable will eventually wrap to zero given 2458 // that it starts at zero and its Step is a power of two; the loop will then 2459 // exit, with the last early-exit vector comparison also producing all-true. 2460 if (Cost->foldTailByMasking()) { 2461 assert(isPowerOf2_32(VF * UF) && 2462 "VF*UF must be a power of 2 when folding tail by masking"); 2463 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2464 } 2465 2466 // Now we need to generate the expression for the part of the loop that the 2467 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2468 // iterations are not required for correctness, or N - Step, otherwise. Step 2469 // is equal to the vectorization factor (number of SIMD elements) times the 2470 // unroll factor (number of SIMD instructions). 2471 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2472 2473 // If there is a non-reversed interleaved group that may speculatively access 2474 // memory out-of-bounds, we need to ensure that there will be at least one 2475 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2476 // the trip count, we set the remainder to be equal to the step. If the step 2477 // does not evenly divide the trip count, no adjustment is necessary since 2478 // there will already be scalar iterations. Note that the minimum iterations 2479 // check ensures that N >= Step. 2480 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2481 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2482 R = Builder.CreateSelect(IsZero, Step, R); 2483 } 2484 2485 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2486 2487 return VectorTripCount; 2488 } 2489 2490 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2491 const DataLayout &DL) { 2492 // Verify that V is a vector type with same number of elements as DstVTy. 2493 unsigned VF = DstVTy->getNumElements(); 2494 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2495 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2496 Type *SrcElemTy = SrcVecTy->getElementType(); 2497 Type *DstElemTy = DstVTy->getElementType(); 2498 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2499 "Vector elements must have same size"); 2500 2501 // Do a direct cast if element types are castable. 2502 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2503 return Builder.CreateBitOrPointerCast(V, DstVTy); 2504 } 2505 // V cannot be directly casted to desired vector type. 2506 // May happen when V is a floating point vector but DstVTy is a vector of 2507 // pointers or vice-versa. Handle this using a two-step bitcast using an 2508 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2509 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2510 "Only one type should be a pointer type"); 2511 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2512 "Only one type should be a floating point type"); 2513 Type *IntTy = 2514 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2515 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2516 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2517 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2518 } 2519 2520 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2521 BasicBlock *Bypass) { 2522 Value *Count = getOrCreateTripCount(L); 2523 BasicBlock *BB = L->getLoopPreheader(); 2524 IRBuilder<> Builder(BB->getTerminator()); 2525 2526 // Generate code to check if the loop's trip count is less than VF * UF, or 2527 // equal to it in case a scalar epilogue is required; this implies that the 2528 // vector trip count is zero. This check also covers the case where adding one 2529 // to the backedge-taken count overflowed leading to an incorrect trip count 2530 // of zero. In this case we will also jump to the scalar loop. 2531 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2532 : ICmpInst::ICMP_ULT; 2533 2534 // If tail is to be folded, vector loop takes care of all iterations. 2535 Value *CheckMinIters = Builder.getFalse(); 2536 if (!Cost->foldTailByMasking()) 2537 CheckMinIters = Builder.CreateICmp( 2538 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2539 "min.iters.check"); 2540 2541 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2542 // Update dominator tree immediately if the generated block is a 2543 // LoopBypassBlock because SCEV expansions to generate loop bypass 2544 // checks may query it before the current function is finished. 2545 DT->addNewBlock(NewBB, BB); 2546 if (L->getParentLoop()) 2547 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2548 ReplaceInstWithInst(BB->getTerminator(), 2549 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2550 LoopBypassBlocks.push_back(BB); 2551 } 2552 2553 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2554 BasicBlock *BB = L->getLoopPreheader(); 2555 2556 // Generate the code to check that the SCEV assumptions that we made. 2557 // We want the new basic block to start at the first instruction in a 2558 // sequence of instructions that form a check. 2559 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2560 "scev.check"); 2561 Value *SCEVCheck = 2562 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2563 2564 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2565 if (C->isZero()) 2566 return; 2567 2568 assert(!Cost->foldTailByMasking() && 2569 "Cannot SCEV check stride or overflow when folding tail"); 2570 // Create a new block containing the stride check. 2571 BB->setName("vector.scevcheck"); 2572 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2573 // Update dominator tree immediately if the generated block is a 2574 // LoopBypassBlock because SCEV expansions to generate loop bypass 2575 // checks may query it before the current function is finished. 2576 DT->addNewBlock(NewBB, BB); 2577 if (L->getParentLoop()) 2578 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2579 ReplaceInstWithInst(BB->getTerminator(), 2580 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2581 LoopBypassBlocks.push_back(BB); 2582 AddedSafetyChecks = true; 2583 } 2584 2585 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2586 // VPlan-native path does not do any analysis for runtime checks currently. 2587 if (EnableVPlanNativePath) 2588 return; 2589 2590 BasicBlock *BB = L->getLoopPreheader(); 2591 2592 // Generate the code that checks in runtime if arrays overlap. We put the 2593 // checks into a separate block to make the more common case of few elements 2594 // faster. 2595 Instruction *FirstCheckInst; 2596 Instruction *MemRuntimeCheck; 2597 std::tie(FirstCheckInst, MemRuntimeCheck) = 2598 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2599 if (!MemRuntimeCheck) 2600 return; 2601 2602 assert(!Cost->foldTailByMasking() && "Cannot check memory when folding tail"); 2603 // Create a new block containing the memory check. 2604 BB->setName("vector.memcheck"); 2605 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2606 // Update dominator tree immediately if the generated block is a 2607 // LoopBypassBlock because SCEV expansions to generate loop bypass 2608 // checks may query it before the current function is finished. 2609 DT->addNewBlock(NewBB, BB); 2610 if (L->getParentLoop()) 2611 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2612 ReplaceInstWithInst(BB->getTerminator(), 2613 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2614 LoopBypassBlocks.push_back(BB); 2615 AddedSafetyChecks = true; 2616 2617 // We currently don't use LoopVersioning for the actual loop cloning but we 2618 // still use it to add the noalias metadata. 2619 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2620 PSE.getSE()); 2621 LVer->prepareNoAliasMetadata(); 2622 } 2623 2624 Value *InnerLoopVectorizer::emitTransformedIndex( 2625 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2626 const InductionDescriptor &ID) const { 2627 2628 SCEVExpander Exp(*SE, DL, "induction"); 2629 auto Step = ID.getStep(); 2630 auto StartValue = ID.getStartValue(); 2631 assert(Index->getType() == Step->getType() && 2632 "Index type does not match StepValue type"); 2633 2634 // Note: the IR at this point is broken. We cannot use SE to create any new 2635 // SCEV and then expand it, hoping that SCEV's simplification will give us 2636 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2637 // lead to various SCEV crashes. So all we can do is to use builder and rely 2638 // on InstCombine for future simplifications. Here we handle some trivial 2639 // cases only. 2640 auto CreateAdd = [&B](Value *X, Value *Y) { 2641 assert(X->getType() == Y->getType() && "Types don't match!"); 2642 if (auto *CX = dyn_cast<ConstantInt>(X)) 2643 if (CX->isZero()) 2644 return Y; 2645 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2646 if (CY->isZero()) 2647 return X; 2648 return B.CreateAdd(X, Y); 2649 }; 2650 2651 auto CreateMul = [&B](Value *X, Value *Y) { 2652 assert(X->getType() == Y->getType() && "Types don't match!"); 2653 if (auto *CX = dyn_cast<ConstantInt>(X)) 2654 if (CX->isOne()) 2655 return Y; 2656 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2657 if (CY->isOne()) 2658 return X; 2659 return B.CreateMul(X, Y); 2660 }; 2661 2662 switch (ID.getKind()) { 2663 case InductionDescriptor::IK_IntInduction: { 2664 assert(Index->getType() == StartValue->getType() && 2665 "Index type does not match StartValue type"); 2666 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2667 return B.CreateSub(StartValue, Index); 2668 auto *Offset = CreateMul( 2669 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2670 return CreateAdd(StartValue, Offset); 2671 } 2672 case InductionDescriptor::IK_PtrInduction: { 2673 assert(isa<SCEVConstant>(Step) && 2674 "Expected constant step for pointer induction"); 2675 return B.CreateGEP( 2676 StartValue->getType()->getPointerElementType(), StartValue, 2677 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2678 &*B.GetInsertPoint()))); 2679 } 2680 case InductionDescriptor::IK_FpInduction: { 2681 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2682 auto InductionBinOp = ID.getInductionBinOp(); 2683 assert(InductionBinOp && 2684 (InductionBinOp->getOpcode() == Instruction::FAdd || 2685 InductionBinOp->getOpcode() == Instruction::FSub) && 2686 "Original bin op should be defined for FP induction"); 2687 2688 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2689 2690 // Floating point operations had to be 'fast' to enable the induction. 2691 FastMathFlags Flags; 2692 Flags.setFast(); 2693 2694 Value *MulExp = B.CreateFMul(StepValue, Index); 2695 if (isa<Instruction>(MulExp)) 2696 // We have to check, the MulExp may be a constant. 2697 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2698 2699 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2700 "induction"); 2701 if (isa<Instruction>(BOp)) 2702 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2703 2704 return BOp; 2705 } 2706 case InductionDescriptor::IK_NoInduction: 2707 return nullptr; 2708 } 2709 llvm_unreachable("invalid enum"); 2710 } 2711 2712 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2713 /* 2714 In this function we generate a new loop. The new loop will contain 2715 the vectorized instructions while the old loop will continue to run the 2716 scalar remainder. 2717 2718 [ ] <-- loop iteration number check. 2719 / | 2720 / v 2721 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2722 | / | 2723 | / v 2724 || [ ] <-- vector pre header. 2725 |/ | 2726 | v 2727 | [ ] \ 2728 | [ ]_| <-- vector loop. 2729 | | 2730 | v 2731 | -[ ] <--- middle-block. 2732 | / | 2733 | / v 2734 -|- >[ ] <--- new preheader. 2735 | | 2736 | v 2737 | [ ] \ 2738 | [ ]_| <-- old scalar loop to handle remainder. 2739 \ | 2740 \ v 2741 >[ ] <-- exit block. 2742 ... 2743 */ 2744 2745 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2746 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2747 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2748 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2749 assert(VectorPH && "Invalid loop structure"); 2750 assert(ExitBlock && "Must have an exit block"); 2751 2752 // Some loops have a single integer induction variable, while other loops 2753 // don't. One example is c++ iterators that often have multiple pointer 2754 // induction variables. In the code below we also support a case where we 2755 // don't have a single induction variable. 2756 // 2757 // We try to obtain an induction variable from the original loop as hard 2758 // as possible. However if we don't find one that: 2759 // - is an integer 2760 // - counts from zero, stepping by one 2761 // - is the size of the widest induction variable type 2762 // then we create a new one. 2763 OldInduction = Legal->getPrimaryInduction(); 2764 Type *IdxTy = Legal->getWidestInductionType(); 2765 2766 // Split the single block loop into the two loop structure described above. 2767 BasicBlock *VecBody = 2768 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2769 BasicBlock *MiddleBlock = 2770 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2771 BasicBlock *ScalarPH = 2772 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2773 2774 // Create and register the new vector loop. 2775 Loop *Lp = LI->AllocateLoop(); 2776 Loop *ParentLoop = OrigLoop->getParentLoop(); 2777 2778 // Insert the new loop into the loop nest and register the new basic blocks 2779 // before calling any utilities such as SCEV that require valid LoopInfo. 2780 if (ParentLoop) { 2781 ParentLoop->addChildLoop(Lp); 2782 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2783 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2784 } else { 2785 LI->addTopLevelLoop(Lp); 2786 } 2787 Lp->addBasicBlockToLoop(VecBody, *LI); 2788 2789 // Find the loop boundaries. 2790 Value *Count = getOrCreateTripCount(Lp); 2791 2792 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2793 2794 // Now, compare the new count to zero. If it is zero skip the vector loop and 2795 // jump to the scalar loop. This check also covers the case where the 2796 // backedge-taken count is uint##_max: adding one to it will overflow leading 2797 // to an incorrect trip count of zero. In this (rare) case we will also jump 2798 // to the scalar loop. 2799 emitMinimumIterationCountCheck(Lp, ScalarPH); 2800 2801 // Generate the code to check any assumptions that we've made for SCEV 2802 // expressions. 2803 emitSCEVChecks(Lp, ScalarPH); 2804 2805 // Generate the code that checks in runtime if arrays overlap. We put the 2806 // checks into a separate block to make the more common case of few elements 2807 // faster. 2808 emitMemRuntimeChecks(Lp, ScalarPH); 2809 2810 // Generate the induction variable. 2811 // The loop step is equal to the vectorization factor (num of SIMD elements) 2812 // times the unroll factor (num of SIMD instructions). 2813 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2814 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2815 Induction = 2816 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2817 getDebugLocFromInstOrOperands(OldInduction)); 2818 2819 // We are going to resume the execution of the scalar loop. 2820 // Go over all of the induction variables that we found and fix the 2821 // PHIs that are left in the scalar version of the loop. 2822 // The starting values of PHI nodes depend on the counter of the last 2823 // iteration in the vectorized loop. 2824 // If we come from a bypass edge then we need to start from the original 2825 // start value. 2826 2827 // This variable saves the new starting index for the scalar loop. It is used 2828 // to test if there are any tail iterations left once the vector loop has 2829 // completed. 2830 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2831 for (auto &InductionEntry : *List) { 2832 PHINode *OrigPhi = InductionEntry.first; 2833 InductionDescriptor II = InductionEntry.second; 2834 2835 // Create phi nodes to merge from the backedge-taken check block. 2836 PHINode *BCResumeVal = PHINode::Create( 2837 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 2838 // Copy original phi DL over to the new one. 2839 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 2840 Value *&EndValue = IVEndValues[OrigPhi]; 2841 if (OrigPhi == OldInduction) { 2842 // We know what the end value is. 2843 EndValue = CountRoundDown; 2844 } else { 2845 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 2846 Type *StepType = II.getStep()->getType(); 2847 Instruction::CastOps CastOp = 2848 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 2849 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 2850 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2851 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 2852 EndValue->setName("ind.end"); 2853 } 2854 2855 // The new PHI merges the original incoming value, in case of a bypass, 2856 // or the value at the end of the vectorized loop. 2857 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2858 2859 // Fix the scalar body counter (PHI node). 2860 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 2861 2862 // The old induction's phi node in the scalar body needs the truncated 2863 // value. 2864 for (BasicBlock *BB : LoopBypassBlocks) 2865 BCResumeVal->addIncoming(II.getStartValue(), BB); 2866 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 2867 } 2868 2869 // Add a check in the middle block to see if we have completed 2870 // all of the iterations in the first vector loop. 2871 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2872 // If tail is to be folded, we know we don't need to run the remainder. 2873 Value *CmpN = Builder.getTrue(); 2874 if (!Cost->foldTailByMasking()) 2875 CmpN = 2876 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2877 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 2878 ReplaceInstWithInst(MiddleBlock->getTerminator(), 2879 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 2880 2881 // Get ready to start creating new instructions into the vectorized body. 2882 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2883 2884 // Save the state. 2885 LoopVectorPreHeader = Lp->getLoopPreheader(); 2886 LoopScalarPreHeader = ScalarPH; 2887 LoopMiddleBlock = MiddleBlock; 2888 LoopExitBlock = ExitBlock; 2889 LoopVectorBody = VecBody; 2890 LoopScalarBody = OldBasicBlock; 2891 2892 Optional<MDNode *> VectorizedLoopID = 2893 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 2894 LLVMLoopVectorizeFollowupVectorized}); 2895 if (VectorizedLoopID.hasValue()) { 2896 Lp->setLoopID(VectorizedLoopID.getValue()); 2897 2898 // Do not setAlreadyVectorized if loop attributes have been defined 2899 // explicitly. 2900 return LoopVectorPreHeader; 2901 } 2902 2903 // Keep all loop hints from the original loop on the vector loop (we'll 2904 // replace the vectorizer-specific hints below). 2905 if (MDNode *LID = OrigLoop->getLoopID()) 2906 Lp->setLoopID(LID); 2907 2908 LoopVectorizeHints Hints(Lp, true, *ORE); 2909 Hints.setAlreadyVectorized(); 2910 2911 return LoopVectorPreHeader; 2912 } 2913 2914 // Fix up external users of the induction variable. At this point, we are 2915 // in LCSSA form, with all external PHIs that use the IV having one input value, 2916 // coming from the remainder loop. We need those PHIs to also have a correct 2917 // value for the IV when arriving directly from the middle block. 2918 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 2919 const InductionDescriptor &II, 2920 Value *CountRoundDown, Value *EndValue, 2921 BasicBlock *MiddleBlock) { 2922 // There are two kinds of external IV usages - those that use the value 2923 // computed in the last iteration (the PHI) and those that use the penultimate 2924 // value (the value that feeds into the phi from the loop latch). 2925 // We allow both, but they, obviously, have different values. 2926 2927 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 2928 2929 DenseMap<Value *, Value *> MissingVals; 2930 2931 // An external user of the last iteration's value should see the value that 2932 // the remainder loop uses to initialize its own IV. 2933 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 2934 for (User *U : PostInc->users()) { 2935 Instruction *UI = cast<Instruction>(U); 2936 if (!OrigLoop->contains(UI)) { 2937 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2938 MissingVals[UI] = EndValue; 2939 } 2940 } 2941 2942 // An external user of the penultimate value need to see EndValue - Step. 2943 // The simplest way to get this is to recompute it from the constituent SCEVs, 2944 // that is Start + (Step * (CRD - 1)). 2945 for (User *U : OrigPhi->users()) { 2946 auto *UI = cast<Instruction>(U); 2947 if (!OrigLoop->contains(UI)) { 2948 const DataLayout &DL = 2949 OrigLoop->getHeader()->getModule()->getDataLayout(); 2950 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2951 2952 IRBuilder<> B(MiddleBlock->getTerminator()); 2953 Value *CountMinusOne = B.CreateSub( 2954 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 2955 Value *CMO = 2956 !II.getStep()->getType()->isIntegerTy() 2957 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 2958 II.getStep()->getType()) 2959 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 2960 CMO->setName("cast.cmo"); 2961 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 2962 Escape->setName("ind.escape"); 2963 MissingVals[UI] = Escape; 2964 } 2965 } 2966 2967 for (auto &I : MissingVals) { 2968 PHINode *PHI = cast<PHINode>(I.first); 2969 // One corner case we have to handle is two IVs "chasing" each-other, 2970 // that is %IV2 = phi [...], [ %IV1, %latch ] 2971 // In this case, if IV1 has an external use, we need to avoid adding both 2972 // "last value of IV1" and "penultimate value of IV2". So, verify that we 2973 // don't already have an incoming value for the middle block. 2974 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 2975 PHI->addIncoming(I.second, MiddleBlock); 2976 } 2977 } 2978 2979 namespace { 2980 2981 struct CSEDenseMapInfo { 2982 static bool canHandle(const Instruction *I) { 2983 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 2984 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 2985 } 2986 2987 static inline Instruction *getEmptyKey() { 2988 return DenseMapInfo<Instruction *>::getEmptyKey(); 2989 } 2990 2991 static inline Instruction *getTombstoneKey() { 2992 return DenseMapInfo<Instruction *>::getTombstoneKey(); 2993 } 2994 2995 static unsigned getHashValue(const Instruction *I) { 2996 assert(canHandle(I) && "Unknown instruction!"); 2997 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 2998 I->value_op_end())); 2999 } 3000 3001 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3002 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3003 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3004 return LHS == RHS; 3005 return LHS->isIdenticalTo(RHS); 3006 } 3007 }; 3008 3009 } // end anonymous namespace 3010 3011 ///Perform cse of induction variable instructions. 3012 static void cse(BasicBlock *BB) { 3013 // Perform simple cse. 3014 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3015 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3016 Instruction *In = &*I++; 3017 3018 if (!CSEDenseMapInfo::canHandle(In)) 3019 continue; 3020 3021 // Check if we can replace this instruction with any of the 3022 // visited instructions. 3023 if (Instruction *V = CSEMap.lookup(In)) { 3024 In->replaceAllUsesWith(V); 3025 In->eraseFromParent(); 3026 continue; 3027 } 3028 3029 CSEMap[In] = In; 3030 } 3031 } 3032 3033 /// Estimate the overhead of scalarizing an instruction. This is a 3034 /// convenience wrapper for the type-based getScalarizationOverhead API. 3035 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3036 const TargetTransformInfo &TTI) { 3037 if (VF == 1) 3038 return 0; 3039 3040 unsigned Cost = 0; 3041 Type *RetTy = ToVectorTy(I->getType(), VF); 3042 if (!RetTy->isVoidTy() && 3043 (!isa<LoadInst>(I) || 3044 !TTI.supportsEfficientVectorElementLoadStore())) 3045 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3046 3047 // Some targets keep addresses scalar. 3048 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 3049 return Cost; 3050 3051 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3052 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3053 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3054 } 3055 else if (!isa<StoreInst>(I) || 3056 !TTI.supportsEfficientVectorElementLoadStore()) { 3057 SmallVector<const Value *, 4> Operands(I->operand_values()); 3058 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3059 } 3060 3061 return Cost; 3062 } 3063 3064 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3065 // Return the cost of the instruction, including scalarization overhead if it's 3066 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3067 // i.e. either vector version isn't available, or is too expensive. 3068 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3069 const TargetTransformInfo &TTI, 3070 const TargetLibraryInfo *TLI, 3071 bool &NeedToScalarize) { 3072 Function *F = CI->getCalledFunction(); 3073 StringRef FnName = CI->getCalledFunction()->getName(); 3074 Type *ScalarRetTy = CI->getType(); 3075 SmallVector<Type *, 4> Tys, ScalarTys; 3076 for (auto &ArgOp : CI->arg_operands()) 3077 ScalarTys.push_back(ArgOp->getType()); 3078 3079 // Estimate cost of scalarized vector call. The source operands are assumed 3080 // to be vectors, so we need to extract individual elements from there, 3081 // execute VF scalar calls, and then gather the result into the vector return 3082 // value. 3083 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3084 if (VF == 1) 3085 return ScalarCallCost; 3086 3087 // Compute corresponding vector type for return value and arguments. 3088 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3089 for (Type *ScalarTy : ScalarTys) 3090 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3091 3092 // Compute costs of unpacking argument values for the scalar calls and 3093 // packing the return values to a vector. 3094 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3095 3096 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3097 3098 // If we can't emit a vector call for this function, then the currently found 3099 // cost is the cost we need to return. 3100 NeedToScalarize = true; 3101 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3102 return Cost; 3103 3104 // If the corresponding vector cost is cheaper, return its cost. 3105 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3106 if (VectorCallCost < Cost) { 3107 NeedToScalarize = false; 3108 return VectorCallCost; 3109 } 3110 return Cost; 3111 } 3112 3113 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3114 // factor VF. Return the cost of the instruction, including scalarization 3115 // overhead if it's needed. 3116 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3117 const TargetTransformInfo &TTI, 3118 const TargetLibraryInfo *TLI) { 3119 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3120 assert(ID && "Expected intrinsic call!"); 3121 3122 FastMathFlags FMF; 3123 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3124 FMF = FPMO->getFastMathFlags(); 3125 3126 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3127 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3128 } 3129 3130 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3131 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3132 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3133 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3134 } 3135 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3136 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3137 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3138 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3139 } 3140 3141 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3142 // For every instruction `I` in MinBWs, truncate the operands, create a 3143 // truncated version of `I` and reextend its result. InstCombine runs 3144 // later and will remove any ext/trunc pairs. 3145 SmallPtrSet<Value *, 4> Erased; 3146 for (const auto &KV : Cost->getMinimalBitwidths()) { 3147 // If the value wasn't vectorized, we must maintain the original scalar 3148 // type. The absence of the value from VectorLoopValueMap indicates that it 3149 // wasn't vectorized. 3150 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3151 continue; 3152 for (unsigned Part = 0; Part < UF; ++Part) { 3153 Value *I = getOrCreateVectorValue(KV.first, Part); 3154 if (Erased.find(I) != Erased.end() || I->use_empty() || 3155 !isa<Instruction>(I)) 3156 continue; 3157 Type *OriginalTy = I->getType(); 3158 Type *ScalarTruncatedTy = 3159 IntegerType::get(OriginalTy->getContext(), KV.second); 3160 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3161 OriginalTy->getVectorNumElements()); 3162 if (TruncatedTy == OriginalTy) 3163 continue; 3164 3165 IRBuilder<> B(cast<Instruction>(I)); 3166 auto ShrinkOperand = [&](Value *V) -> Value * { 3167 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3168 if (ZI->getSrcTy() == TruncatedTy) 3169 return ZI->getOperand(0); 3170 return B.CreateZExtOrTrunc(V, TruncatedTy); 3171 }; 3172 3173 // The actual instruction modification depends on the instruction type, 3174 // unfortunately. 3175 Value *NewI = nullptr; 3176 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3177 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3178 ShrinkOperand(BO->getOperand(1))); 3179 3180 // Any wrapping introduced by shrinking this operation shouldn't be 3181 // considered undefined behavior. So, we can't unconditionally copy 3182 // arithmetic wrapping flags to NewI. 3183 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3184 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3185 NewI = 3186 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3187 ShrinkOperand(CI->getOperand(1))); 3188 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3189 NewI = B.CreateSelect(SI->getCondition(), 3190 ShrinkOperand(SI->getTrueValue()), 3191 ShrinkOperand(SI->getFalseValue())); 3192 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3193 switch (CI->getOpcode()) { 3194 default: 3195 llvm_unreachable("Unhandled cast!"); 3196 case Instruction::Trunc: 3197 NewI = ShrinkOperand(CI->getOperand(0)); 3198 break; 3199 case Instruction::SExt: 3200 NewI = B.CreateSExtOrTrunc( 3201 CI->getOperand(0), 3202 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3203 break; 3204 case Instruction::ZExt: 3205 NewI = B.CreateZExtOrTrunc( 3206 CI->getOperand(0), 3207 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3208 break; 3209 } 3210 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3211 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3212 auto *O0 = B.CreateZExtOrTrunc( 3213 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3214 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3215 auto *O1 = B.CreateZExtOrTrunc( 3216 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3217 3218 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3219 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3220 // Don't do anything with the operands, just extend the result. 3221 continue; 3222 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3223 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3224 auto *O0 = B.CreateZExtOrTrunc( 3225 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3226 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3227 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3228 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3229 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3230 auto *O0 = B.CreateZExtOrTrunc( 3231 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3232 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3233 } else { 3234 // If we don't know what to do, be conservative and don't do anything. 3235 continue; 3236 } 3237 3238 // Lastly, extend the result. 3239 NewI->takeName(cast<Instruction>(I)); 3240 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3241 I->replaceAllUsesWith(Res); 3242 cast<Instruction>(I)->eraseFromParent(); 3243 Erased.insert(I); 3244 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3245 } 3246 } 3247 3248 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3249 for (const auto &KV : Cost->getMinimalBitwidths()) { 3250 // If the value wasn't vectorized, we must maintain the original scalar 3251 // type. The absence of the value from VectorLoopValueMap indicates that it 3252 // wasn't vectorized. 3253 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3254 continue; 3255 for (unsigned Part = 0; Part < UF; ++Part) { 3256 Value *I = getOrCreateVectorValue(KV.first, Part); 3257 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3258 if (Inst && Inst->use_empty()) { 3259 Value *NewI = Inst->getOperand(0); 3260 Inst->eraseFromParent(); 3261 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3262 } 3263 } 3264 } 3265 } 3266 3267 void InnerLoopVectorizer::fixVectorizedLoop() { 3268 // Insert truncates and extends for any truncated instructions as hints to 3269 // InstCombine. 3270 if (VF > 1) 3271 truncateToMinimalBitwidths(); 3272 3273 // Fix widened non-induction PHIs by setting up the PHI operands. 3274 if (OrigPHIsToFix.size()) { 3275 assert(EnableVPlanNativePath && 3276 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3277 fixNonInductionPHIs(); 3278 } 3279 3280 // At this point every instruction in the original loop is widened to a 3281 // vector form. Now we need to fix the recurrences in the loop. These PHI 3282 // nodes are currently empty because we did not want to introduce cycles. 3283 // This is the second stage of vectorizing recurrences. 3284 fixCrossIterationPHIs(); 3285 3286 // Update the dominator tree. 3287 // 3288 // FIXME: After creating the structure of the new loop, the dominator tree is 3289 // no longer up-to-date, and it remains that way until we update it 3290 // here. An out-of-date dominator tree is problematic for SCEV, 3291 // because SCEVExpander uses it to guide code generation. The 3292 // vectorizer use SCEVExpanders in several places. Instead, we should 3293 // keep the dominator tree up-to-date as we go. 3294 updateAnalysis(); 3295 3296 // Fix-up external users of the induction variables. 3297 for (auto &Entry : *Legal->getInductionVars()) 3298 fixupIVUsers(Entry.first, Entry.second, 3299 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3300 IVEndValues[Entry.first], LoopMiddleBlock); 3301 3302 fixLCSSAPHIs(); 3303 for (Instruction *PI : PredicatedInstructions) 3304 sinkScalarOperands(&*PI); 3305 3306 // Remove redundant induction instructions. 3307 cse(LoopVectorBody); 3308 } 3309 3310 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3311 // In order to support recurrences we need to be able to vectorize Phi nodes. 3312 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3313 // stage #2: We now need to fix the recurrences by adding incoming edges to 3314 // the currently empty PHI nodes. At this point every instruction in the 3315 // original loop is widened to a vector form so we can use them to construct 3316 // the incoming edges. 3317 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3318 // Handle first-order recurrences and reductions that need to be fixed. 3319 if (Legal->isFirstOrderRecurrence(&Phi)) 3320 fixFirstOrderRecurrence(&Phi); 3321 else if (Legal->isReductionVariable(&Phi)) 3322 fixReduction(&Phi); 3323 } 3324 } 3325 3326 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3327 // This is the second phase of vectorizing first-order recurrences. An 3328 // overview of the transformation is described below. Suppose we have the 3329 // following loop. 3330 // 3331 // for (int i = 0; i < n; ++i) 3332 // b[i] = a[i] - a[i - 1]; 3333 // 3334 // There is a first-order recurrence on "a". For this loop, the shorthand 3335 // scalar IR looks like: 3336 // 3337 // scalar.ph: 3338 // s_init = a[-1] 3339 // br scalar.body 3340 // 3341 // scalar.body: 3342 // i = phi [0, scalar.ph], [i+1, scalar.body] 3343 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3344 // s2 = a[i] 3345 // b[i] = s2 - s1 3346 // br cond, scalar.body, ... 3347 // 3348 // In this example, s1 is a recurrence because it's value depends on the 3349 // previous iteration. In the first phase of vectorization, we created a 3350 // temporary value for s1. We now complete the vectorization and produce the 3351 // shorthand vector IR shown below (for VF = 4, UF = 1). 3352 // 3353 // vector.ph: 3354 // v_init = vector(..., ..., ..., a[-1]) 3355 // br vector.body 3356 // 3357 // vector.body 3358 // i = phi [0, vector.ph], [i+4, vector.body] 3359 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3360 // v2 = a[i, i+1, i+2, i+3]; 3361 // v3 = vector(v1(3), v2(0, 1, 2)) 3362 // b[i, i+1, i+2, i+3] = v2 - v3 3363 // br cond, vector.body, middle.block 3364 // 3365 // middle.block: 3366 // x = v2(3) 3367 // br scalar.ph 3368 // 3369 // scalar.ph: 3370 // s_init = phi [x, middle.block], [a[-1], otherwise] 3371 // br scalar.body 3372 // 3373 // After execution completes the vector loop, we extract the next value of 3374 // the recurrence (x) to use as the initial value in the scalar loop. 3375 3376 // Get the original loop preheader and single loop latch. 3377 auto *Preheader = OrigLoop->getLoopPreheader(); 3378 auto *Latch = OrigLoop->getLoopLatch(); 3379 3380 // Get the initial and previous values of the scalar recurrence. 3381 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3382 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3383 3384 // Create a vector from the initial value. 3385 auto *VectorInit = ScalarInit; 3386 if (VF > 1) { 3387 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3388 VectorInit = Builder.CreateInsertElement( 3389 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3390 Builder.getInt32(VF - 1), "vector.recur.init"); 3391 } 3392 3393 // We constructed a temporary phi node in the first phase of vectorization. 3394 // This phi node will eventually be deleted. 3395 Builder.SetInsertPoint( 3396 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3397 3398 // Create a phi node for the new recurrence. The current value will either be 3399 // the initial value inserted into a vector or loop-varying vector value. 3400 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3401 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3402 3403 // Get the vectorized previous value of the last part UF - 1. It appears last 3404 // among all unrolled iterations, due to the order of their construction. 3405 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3406 3407 // Set the insertion point after the previous value if it is an instruction. 3408 // Note that the previous value may have been constant-folded so it is not 3409 // guaranteed to be an instruction in the vector loop. Also, if the previous 3410 // value is a phi node, we should insert after all the phi nodes to avoid 3411 // breaking basic block verification. 3412 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 3413 isa<PHINode>(PreviousLastPart)) 3414 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3415 else 3416 Builder.SetInsertPoint( 3417 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 3418 3419 // We will construct a vector for the recurrence by combining the values for 3420 // the current and previous iterations. This is the required shuffle mask. 3421 SmallVector<Constant *, 8> ShuffleMask(VF); 3422 ShuffleMask[0] = Builder.getInt32(VF - 1); 3423 for (unsigned I = 1; I < VF; ++I) 3424 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3425 3426 // The vector from which to take the initial value for the current iteration 3427 // (actual or unrolled). Initially, this is the vector phi node. 3428 Value *Incoming = VecPhi; 3429 3430 // Shuffle the current and previous vector and update the vector parts. 3431 for (unsigned Part = 0; Part < UF; ++Part) { 3432 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3433 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3434 auto *Shuffle = 3435 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3436 ConstantVector::get(ShuffleMask)) 3437 : Incoming; 3438 PhiPart->replaceAllUsesWith(Shuffle); 3439 cast<Instruction>(PhiPart)->eraseFromParent(); 3440 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3441 Incoming = PreviousPart; 3442 } 3443 3444 // Fix the latch value of the new recurrence in the vector loop. 3445 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3446 3447 // Extract the last vector element in the middle block. This will be the 3448 // initial value for the recurrence when jumping to the scalar loop. 3449 auto *ExtractForScalar = Incoming; 3450 if (VF > 1) { 3451 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3452 ExtractForScalar = Builder.CreateExtractElement( 3453 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3454 } 3455 // Extract the second last element in the middle block if the 3456 // Phi is used outside the loop. We need to extract the phi itself 3457 // and not the last element (the phi update in the current iteration). This 3458 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3459 // when the scalar loop is not run at all. 3460 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3461 if (VF > 1) 3462 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3463 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3464 // When loop is unrolled without vectorizing, initialize 3465 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3466 // `Incoming`. This is analogous to the vectorized case above: extracting the 3467 // second last element when VF > 1. 3468 else if (UF > 1) 3469 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3470 3471 // Fix the initial value of the original recurrence in the scalar loop. 3472 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3473 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3474 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3475 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3476 Start->addIncoming(Incoming, BB); 3477 } 3478 3479 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3480 Phi->setName("scalar.recur"); 3481 3482 // Finally, fix users of the recurrence outside the loop. The users will need 3483 // either the last value of the scalar recurrence or the last value of the 3484 // vector recurrence we extracted in the middle block. Since the loop is in 3485 // LCSSA form, we just need to find all the phi nodes for the original scalar 3486 // recurrence in the exit block, and then add an edge for the middle block. 3487 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3488 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3489 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3490 } 3491 } 3492 } 3493 3494 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3495 Constant *Zero = Builder.getInt32(0); 3496 3497 // Get it's reduction variable descriptor. 3498 assert(Legal->isReductionVariable(Phi) && 3499 "Unable to find the reduction variable"); 3500 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3501 3502 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3503 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3504 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3505 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3506 RdxDesc.getMinMaxRecurrenceKind(); 3507 setDebugLocFromInst(Builder, ReductionStartValue); 3508 3509 // We need to generate a reduction vector from the incoming scalar. 3510 // To do so, we need to generate the 'identity' vector and override 3511 // one of the elements with the incoming scalar reduction. We need 3512 // to do it in the vector-loop preheader. 3513 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3514 3515 // This is the vector-clone of the value that leaves the loop. 3516 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3517 3518 // Find the reduction identity variable. Zero for addition, or, xor, 3519 // one for multiplication, -1 for And. 3520 Value *Identity; 3521 Value *VectorStart; 3522 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3523 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3524 // MinMax reduction have the start value as their identify. 3525 if (VF == 1) { 3526 VectorStart = Identity = ReductionStartValue; 3527 } else { 3528 VectorStart = Identity = 3529 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3530 } 3531 } else { 3532 // Handle other reduction kinds: 3533 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3534 RK, VecTy->getScalarType()); 3535 if (VF == 1) { 3536 Identity = Iden; 3537 // This vector is the Identity vector where the first element is the 3538 // incoming scalar reduction. 3539 VectorStart = ReductionStartValue; 3540 } else { 3541 Identity = ConstantVector::getSplat(VF, Iden); 3542 3543 // This vector is the Identity vector where the first element is the 3544 // incoming scalar reduction. 3545 VectorStart = 3546 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3547 } 3548 } 3549 3550 // Fix the vector-loop phi. 3551 3552 // Reductions do not have to start at zero. They can start with 3553 // any loop invariant values. 3554 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3555 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3556 for (unsigned Part = 0; Part < UF; ++Part) { 3557 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3558 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3559 // Make sure to add the reduction stat value only to the 3560 // first unroll part. 3561 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3562 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3563 cast<PHINode>(VecRdxPhi) 3564 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3565 } 3566 3567 // Before each round, move the insertion point right between 3568 // the PHIs and the values we are going to write. 3569 // This allows us to write both PHINodes and the extractelement 3570 // instructions. 3571 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3572 3573 setDebugLocFromInst(Builder, LoopExitInst); 3574 3575 // If the vector reduction can be performed in a smaller type, we truncate 3576 // then extend the loop exit value to enable InstCombine to evaluate the 3577 // entire expression in the smaller type. 3578 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3579 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3580 Builder.SetInsertPoint( 3581 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3582 VectorParts RdxParts(UF); 3583 for (unsigned Part = 0; Part < UF; ++Part) { 3584 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3585 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3586 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3587 : Builder.CreateZExt(Trunc, VecTy); 3588 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3589 UI != RdxParts[Part]->user_end();) 3590 if (*UI != Trunc) { 3591 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3592 RdxParts[Part] = Extnd; 3593 } else { 3594 ++UI; 3595 } 3596 } 3597 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3598 for (unsigned Part = 0; Part < UF; ++Part) { 3599 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3600 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3601 } 3602 } 3603 3604 // Reduce all of the unrolled parts into a single vector. 3605 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3606 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3607 setDebugLocFromInst(Builder, ReducedPartRdx); 3608 for (unsigned Part = 1; Part < UF; ++Part) { 3609 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3610 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3611 // Floating point operations had to be 'fast' to enable the reduction. 3612 ReducedPartRdx = addFastMathFlag( 3613 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3614 ReducedPartRdx, "bin.rdx")); 3615 else 3616 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3617 RdxPart); 3618 } 3619 3620 if (VF > 1) { 3621 bool NoNaN = Legal->hasFunNoNaNAttr(); 3622 ReducedPartRdx = 3623 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3624 // If the reduction can be performed in a smaller type, we need to extend 3625 // the reduction to the wider type before we branch to the original loop. 3626 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3627 ReducedPartRdx = 3628 RdxDesc.isSigned() 3629 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3630 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3631 } 3632 3633 // Create a phi node that merges control-flow from the backedge-taken check 3634 // block and the middle block. 3635 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3636 LoopScalarPreHeader->getTerminator()); 3637 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3638 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3639 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3640 3641 // Now, we need to fix the users of the reduction variable 3642 // inside and outside of the scalar remainder loop. 3643 // We know that the loop is in LCSSA form. We need to update the 3644 // PHI nodes in the exit blocks. 3645 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3646 // All PHINodes need to have a single entry edge, or two if 3647 // we already fixed them. 3648 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3649 3650 // We found a reduction value exit-PHI. Update it with the 3651 // incoming bypass edge. 3652 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3653 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3654 } // end of the LCSSA phi scan. 3655 3656 // Fix the scalar loop reduction variable with the incoming reduction sum 3657 // from the vector body and from the backedge value. 3658 int IncomingEdgeBlockIdx = 3659 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3660 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3661 // Pick the other block. 3662 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3663 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3664 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3665 } 3666 3667 void InnerLoopVectorizer::fixLCSSAPHIs() { 3668 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3669 if (LCSSAPhi.getNumIncomingValues() == 1) { 3670 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3671 // Non-instruction incoming values will have only one value. 3672 unsigned LastLane = 0; 3673 if (isa<Instruction>(IncomingValue)) 3674 LastLane = Cost->isUniformAfterVectorization( 3675 cast<Instruction>(IncomingValue), VF) 3676 ? 0 3677 : VF - 1; 3678 // Can be a loop invariant incoming value or the last scalar value to be 3679 // extracted from the vectorized loop. 3680 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3681 Value *lastIncomingValue = 3682 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3683 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3684 } 3685 } 3686 } 3687 3688 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3689 // The basic block and loop containing the predicated instruction. 3690 auto *PredBB = PredInst->getParent(); 3691 auto *VectorLoop = LI->getLoopFor(PredBB); 3692 3693 // Initialize a worklist with the operands of the predicated instruction. 3694 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3695 3696 // Holds instructions that we need to analyze again. An instruction may be 3697 // reanalyzed if we don't yet know if we can sink it or not. 3698 SmallVector<Instruction *, 8> InstsToReanalyze; 3699 3700 // Returns true if a given use occurs in the predicated block. Phi nodes use 3701 // their operands in their corresponding predecessor blocks. 3702 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3703 auto *I = cast<Instruction>(U.getUser()); 3704 BasicBlock *BB = I->getParent(); 3705 if (auto *Phi = dyn_cast<PHINode>(I)) 3706 BB = Phi->getIncomingBlock( 3707 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3708 return BB == PredBB; 3709 }; 3710 3711 // Iteratively sink the scalarized operands of the predicated instruction 3712 // into the block we created for it. When an instruction is sunk, it's 3713 // operands are then added to the worklist. The algorithm ends after one pass 3714 // through the worklist doesn't sink a single instruction. 3715 bool Changed; 3716 do { 3717 // Add the instructions that need to be reanalyzed to the worklist, and 3718 // reset the changed indicator. 3719 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 3720 InstsToReanalyze.clear(); 3721 Changed = false; 3722 3723 while (!Worklist.empty()) { 3724 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 3725 3726 // We can't sink an instruction if it is a phi node, is already in the 3727 // predicated block, is not in the loop, or may have side effects. 3728 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 3729 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 3730 continue; 3731 3732 // It's legal to sink the instruction if all its uses occur in the 3733 // predicated block. Otherwise, there's nothing to do yet, and we may 3734 // need to reanalyze the instruction. 3735 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 3736 InstsToReanalyze.push_back(I); 3737 continue; 3738 } 3739 3740 // Move the instruction to the beginning of the predicated block, and add 3741 // it's operands to the worklist. 3742 I->moveBefore(&*PredBB->getFirstInsertionPt()); 3743 Worklist.insert(I->op_begin(), I->op_end()); 3744 3745 // The sinking may have enabled other instructions to be sunk, so we will 3746 // need to iterate. 3747 Changed = true; 3748 } 3749 } while (Changed); 3750 } 3751 3752 void InnerLoopVectorizer::fixNonInductionPHIs() { 3753 for (PHINode *OrigPhi : OrigPHIsToFix) { 3754 PHINode *NewPhi = 3755 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 3756 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 3757 3758 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 3759 predecessors(OrigPhi->getParent())); 3760 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 3761 predecessors(NewPhi->getParent())); 3762 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 3763 "Scalar and Vector BB should have the same number of predecessors"); 3764 3765 // The insertion point in Builder may be invalidated by the time we get 3766 // here. Force the Builder insertion point to something valid so that we do 3767 // not run into issues during insertion point restore in 3768 // getOrCreateVectorValue calls below. 3769 Builder.SetInsertPoint(NewPhi); 3770 3771 // The predecessor order is preserved and we can rely on mapping between 3772 // scalar and vector block predecessors. 3773 for (unsigned i = 0; i < NumIncomingValues; ++i) { 3774 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 3775 3776 // When looking up the new scalar/vector values to fix up, use incoming 3777 // values from original phi. 3778 Value *ScIncV = 3779 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 3780 3781 // Scalar incoming value may need a broadcast 3782 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 3783 NewPhi->addIncoming(NewIncV, NewPredBB); 3784 } 3785 } 3786 } 3787 3788 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 3789 unsigned VF) { 3790 PHINode *P = cast<PHINode>(PN); 3791 if (EnableVPlanNativePath) { 3792 // Currently we enter here in the VPlan-native path for non-induction 3793 // PHIs where all control flow is uniform. We simply widen these PHIs. 3794 // Create a vector phi with no operands - the vector phi operands will be 3795 // set at the end of vector code generation. 3796 Type *VecTy = 3797 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3798 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 3799 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 3800 OrigPHIsToFix.push_back(P); 3801 3802 return; 3803 } 3804 3805 assert(PN->getParent() == OrigLoop->getHeader() && 3806 "Non-header phis should have been handled elsewhere"); 3807 3808 // In order to support recurrences we need to be able to vectorize Phi nodes. 3809 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3810 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 3811 // this value when we vectorize all of the instructions that use the PHI. 3812 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 3813 for (unsigned Part = 0; Part < UF; ++Part) { 3814 // This is phase one of vectorizing PHIs. 3815 Type *VecTy = 3816 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3817 Value *EntryPart = PHINode::Create( 3818 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 3819 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 3820 } 3821 return; 3822 } 3823 3824 setDebugLocFromInst(Builder, P); 3825 3826 // This PHINode must be an induction variable. 3827 // Make sure that we know about it. 3828 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 3829 3830 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3831 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3832 3833 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3834 // which can be found from the original scalar operations. 3835 switch (II.getKind()) { 3836 case InductionDescriptor::IK_NoInduction: 3837 llvm_unreachable("Unknown induction"); 3838 case InductionDescriptor::IK_IntInduction: 3839 case InductionDescriptor::IK_FpInduction: 3840 llvm_unreachable("Integer/fp induction is handled elsewhere."); 3841 case InductionDescriptor::IK_PtrInduction: { 3842 // Handle the pointer induction variable case. 3843 assert(P->getType()->isPointerTy() && "Unexpected type."); 3844 // This is the normalized GEP that starts counting at zero. 3845 Value *PtrInd = Induction; 3846 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 3847 // Determine the number of scalars we need to generate for each unroll 3848 // iteration. If the instruction is uniform, we only need to generate the 3849 // first lane. Otherwise, we generate all VF values. 3850 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 3851 // These are the scalar results. Notice that we don't generate vector GEPs 3852 // because scalar GEPs result in better code. 3853 for (unsigned Part = 0; Part < UF; ++Part) { 3854 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3855 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 3856 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3857 Value *SclrGep = 3858 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 3859 SclrGep->setName("next.gep"); 3860 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 3861 } 3862 } 3863 return; 3864 } 3865 } 3866 } 3867 3868 /// A helper function for checking whether an integer division-related 3869 /// instruction may divide by zero (in which case it must be predicated if 3870 /// executed conditionally in the scalar code). 3871 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 3872 /// Non-zero divisors that are non compile-time constants will not be 3873 /// converted into multiplication, so we will still end up scalarizing 3874 /// the division, but can do so w/o predication. 3875 static bool mayDivideByZero(Instruction &I) { 3876 assert((I.getOpcode() == Instruction::UDiv || 3877 I.getOpcode() == Instruction::SDiv || 3878 I.getOpcode() == Instruction::URem || 3879 I.getOpcode() == Instruction::SRem) && 3880 "Unexpected instruction"); 3881 Value *Divisor = I.getOperand(1); 3882 auto *CInt = dyn_cast<ConstantInt>(Divisor); 3883 return !CInt || CInt->isZero(); 3884 } 3885 3886 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 3887 switch (I.getOpcode()) { 3888 case Instruction::Br: 3889 case Instruction::PHI: 3890 llvm_unreachable("This instruction is handled by a different recipe."); 3891 case Instruction::GetElementPtr: { 3892 // Construct a vector GEP by widening the operands of the scalar GEP as 3893 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 3894 // results in a vector of pointers when at least one operand of the GEP 3895 // is vector-typed. Thus, to keep the representation compact, we only use 3896 // vector-typed operands for loop-varying values. 3897 auto *GEP = cast<GetElementPtrInst>(&I); 3898 3899 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 3900 // If we are vectorizing, but the GEP has only loop-invariant operands, 3901 // the GEP we build (by only using vector-typed operands for 3902 // loop-varying values) would be a scalar pointer. Thus, to ensure we 3903 // produce a vector of pointers, we need to either arbitrarily pick an 3904 // operand to broadcast, or broadcast a clone of the original GEP. 3905 // Here, we broadcast a clone of the original. 3906 // 3907 // TODO: If at some point we decide to scalarize instructions having 3908 // loop-invariant operands, this special case will no longer be 3909 // required. We would add the scalarization decision to 3910 // collectLoopScalars() and teach getVectorValue() to broadcast 3911 // the lane-zero scalar value. 3912 auto *Clone = Builder.Insert(GEP->clone()); 3913 for (unsigned Part = 0; Part < UF; ++Part) { 3914 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 3915 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 3916 addMetadata(EntryPart, GEP); 3917 } 3918 } else { 3919 // If the GEP has at least one loop-varying operand, we are sure to 3920 // produce a vector of pointers. But if we are only unrolling, we want 3921 // to produce a scalar GEP for each unroll part. Thus, the GEP we 3922 // produce with the code below will be scalar (if VF == 1) or vector 3923 // (otherwise). Note that for the unroll-only case, we still maintain 3924 // values in the vector mapping with initVector, as we do for other 3925 // instructions. 3926 for (unsigned Part = 0; Part < UF; ++Part) { 3927 // The pointer operand of the new GEP. If it's loop-invariant, we 3928 // won't broadcast it. 3929 auto *Ptr = 3930 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 3931 ? GEP->getPointerOperand() 3932 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 3933 3934 // Collect all the indices for the new GEP. If any index is 3935 // loop-invariant, we won't broadcast it. 3936 SmallVector<Value *, 4> Indices; 3937 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 3938 if (OrigLoop->isLoopInvariant(U.get())) 3939 Indices.push_back(U.get()); 3940 else 3941 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 3942 } 3943 3944 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 3945 // but it should be a vector, otherwise. 3946 auto *NewGEP = 3947 GEP->isInBounds() 3948 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 3949 Indices) 3950 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 3951 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 3952 "NewGEP is not a pointer vector"); 3953 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 3954 addMetadata(NewGEP, GEP); 3955 } 3956 } 3957 3958 break; 3959 } 3960 case Instruction::UDiv: 3961 case Instruction::SDiv: 3962 case Instruction::SRem: 3963 case Instruction::URem: 3964 case Instruction::Add: 3965 case Instruction::FAdd: 3966 case Instruction::Sub: 3967 case Instruction::FSub: 3968 case Instruction::Mul: 3969 case Instruction::FMul: 3970 case Instruction::FDiv: 3971 case Instruction::FRem: 3972 case Instruction::Shl: 3973 case Instruction::LShr: 3974 case Instruction::AShr: 3975 case Instruction::And: 3976 case Instruction::Or: 3977 case Instruction::Xor: { 3978 // Just widen binops. 3979 auto *BinOp = cast<BinaryOperator>(&I); 3980 setDebugLocFromInst(Builder, BinOp); 3981 3982 for (unsigned Part = 0; Part < UF; ++Part) { 3983 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 3984 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 3985 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 3986 3987 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 3988 VecOp->copyIRFlags(BinOp); 3989 3990 // Use this vector value for all users of the original instruction. 3991 VectorLoopValueMap.setVectorValue(&I, Part, V); 3992 addMetadata(V, BinOp); 3993 } 3994 3995 break; 3996 } 3997 case Instruction::Select: { 3998 // Widen selects. 3999 // If the selector is loop invariant we can create a select 4000 // instruction with a scalar condition. Otherwise, use vector-select. 4001 auto *SE = PSE.getSE(); 4002 bool InvariantCond = 4003 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4004 setDebugLocFromInst(Builder, &I); 4005 4006 // The condition can be loop invariant but still defined inside the 4007 // loop. This means that we can't just use the original 'cond' value. 4008 // We have to take the 'vectorized' value and pick the first lane. 4009 // Instcombine will make this a no-op. 4010 4011 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4012 4013 for (unsigned Part = 0; Part < UF; ++Part) { 4014 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4015 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4016 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4017 Value *Sel = 4018 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4019 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4020 addMetadata(Sel, &I); 4021 } 4022 4023 break; 4024 } 4025 4026 case Instruction::ICmp: 4027 case Instruction::FCmp: { 4028 // Widen compares. Generate vector compares. 4029 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4030 auto *Cmp = dyn_cast<CmpInst>(&I); 4031 setDebugLocFromInst(Builder, Cmp); 4032 for (unsigned Part = 0; Part < UF; ++Part) { 4033 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4034 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4035 Value *C = nullptr; 4036 if (FCmp) { 4037 // Propagate fast math flags. 4038 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4039 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4040 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4041 } else { 4042 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4043 } 4044 VectorLoopValueMap.setVectorValue(&I, Part, C); 4045 addMetadata(C, &I); 4046 } 4047 4048 break; 4049 } 4050 4051 case Instruction::ZExt: 4052 case Instruction::SExt: 4053 case Instruction::FPToUI: 4054 case Instruction::FPToSI: 4055 case Instruction::FPExt: 4056 case Instruction::PtrToInt: 4057 case Instruction::IntToPtr: 4058 case Instruction::SIToFP: 4059 case Instruction::UIToFP: 4060 case Instruction::Trunc: 4061 case Instruction::FPTrunc: 4062 case Instruction::BitCast: { 4063 auto *CI = dyn_cast<CastInst>(&I); 4064 setDebugLocFromInst(Builder, CI); 4065 4066 /// Vectorize casts. 4067 Type *DestTy = 4068 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4069 4070 for (unsigned Part = 0; Part < UF; ++Part) { 4071 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4072 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4073 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4074 addMetadata(Cast, &I); 4075 } 4076 break; 4077 } 4078 4079 case Instruction::Call: { 4080 // Ignore dbg intrinsics. 4081 if (isa<DbgInfoIntrinsic>(I)) 4082 break; 4083 setDebugLocFromInst(Builder, &I); 4084 4085 Module *M = I.getParent()->getParent()->getParent(); 4086 auto *CI = cast<CallInst>(&I); 4087 4088 StringRef FnName = CI->getCalledFunction()->getName(); 4089 Function *F = CI->getCalledFunction(); 4090 Type *RetTy = ToVectorTy(CI->getType(), VF); 4091 SmallVector<Type *, 4> Tys; 4092 for (Value *ArgOperand : CI->arg_operands()) 4093 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4094 4095 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4096 4097 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4098 // version of the instruction. 4099 // Is it beneficial to perform intrinsic call compared to lib call? 4100 bool NeedToScalarize; 4101 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4102 bool UseVectorIntrinsic = 4103 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4104 assert((UseVectorIntrinsic || !NeedToScalarize) && 4105 "Instruction should be scalarized elsewhere."); 4106 4107 for (unsigned Part = 0; Part < UF; ++Part) { 4108 SmallVector<Value *, 4> Args; 4109 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4110 Value *Arg = CI->getArgOperand(i); 4111 // Some intrinsics have a scalar argument - don't replace it with a 4112 // vector. 4113 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4114 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4115 Args.push_back(Arg); 4116 } 4117 4118 Function *VectorF; 4119 if (UseVectorIntrinsic) { 4120 // Use vector version of the intrinsic. 4121 Type *TysForDecl[] = {CI->getType()}; 4122 if (VF > 1) 4123 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4124 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4125 } else { 4126 // Use vector version of the library call. 4127 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4128 assert(!VFnName.empty() && "Vector function name is empty."); 4129 VectorF = M->getFunction(VFnName); 4130 if (!VectorF) { 4131 // Generate a declaration 4132 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4133 VectorF = 4134 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4135 VectorF->copyAttributesFrom(F); 4136 } 4137 } 4138 assert(VectorF && "Can't create vector function."); 4139 4140 SmallVector<OperandBundleDef, 1> OpBundles; 4141 CI->getOperandBundlesAsDefs(OpBundles); 4142 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4143 4144 if (isa<FPMathOperator>(V)) 4145 V->copyFastMathFlags(CI); 4146 4147 VectorLoopValueMap.setVectorValue(&I, Part, V); 4148 addMetadata(V, &I); 4149 } 4150 4151 break; 4152 } 4153 4154 default: 4155 // This instruction is not vectorized by simple widening. 4156 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4157 llvm_unreachable("Unhandled instruction!"); 4158 } // end of switch. 4159 } 4160 4161 void InnerLoopVectorizer::updateAnalysis() { 4162 // Forget the original basic block. 4163 PSE.getSE()->forgetLoop(OrigLoop); 4164 4165 // DT is not kept up-to-date for outer loop vectorization 4166 if (EnableVPlanNativePath) 4167 return; 4168 4169 // Update the dominator tree information. 4170 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4171 "Entry does not dominate exit."); 4172 4173 DT->addNewBlock(LoopMiddleBlock, 4174 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4175 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4176 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4177 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4178 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 4179 } 4180 4181 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4182 // We should not collect Scalars more than once per VF. Right now, this 4183 // function is called from collectUniformsAndScalars(), which already does 4184 // this check. Collecting Scalars for VF=1 does not make any sense. 4185 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4186 "This function should not be visited twice for the same VF"); 4187 4188 SmallSetVector<Instruction *, 8> Worklist; 4189 4190 // These sets are used to seed the analysis with pointers used by memory 4191 // accesses that will remain scalar. 4192 SmallSetVector<Instruction *, 8> ScalarPtrs; 4193 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4194 4195 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4196 // The pointer operands of loads and stores will be scalar as long as the 4197 // memory access is not a gather or scatter operation. The value operand of a 4198 // store will remain scalar if the store is scalarized. 4199 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4200 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4201 assert(WideningDecision != CM_Unknown && 4202 "Widening decision should be ready at this moment"); 4203 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4204 if (Ptr == Store->getValueOperand()) 4205 return WideningDecision == CM_Scalarize; 4206 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4207 "Ptr is neither a value or pointer operand"); 4208 return WideningDecision != CM_GatherScatter; 4209 }; 4210 4211 // A helper that returns true if the given value is a bitcast or 4212 // getelementptr instruction contained in the loop. 4213 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4214 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4215 isa<GetElementPtrInst>(V)) && 4216 !TheLoop->isLoopInvariant(V); 4217 }; 4218 4219 // A helper that evaluates a memory access's use of a pointer. If the use 4220 // will be a scalar use, and the pointer is only used by memory accesses, we 4221 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4222 // PossibleNonScalarPtrs. 4223 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4224 // We only care about bitcast and getelementptr instructions contained in 4225 // the loop. 4226 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4227 return; 4228 4229 // If the pointer has already been identified as scalar (e.g., if it was 4230 // also identified as uniform), there's nothing to do. 4231 auto *I = cast<Instruction>(Ptr); 4232 if (Worklist.count(I)) 4233 return; 4234 4235 // If the use of the pointer will be a scalar use, and all users of the 4236 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4237 // place the pointer in PossibleNonScalarPtrs. 4238 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4239 return isa<LoadInst>(U) || isa<StoreInst>(U); 4240 })) 4241 ScalarPtrs.insert(I); 4242 else 4243 PossibleNonScalarPtrs.insert(I); 4244 }; 4245 4246 // We seed the scalars analysis with three classes of instructions: (1) 4247 // instructions marked uniform-after-vectorization, (2) bitcast and 4248 // getelementptr instructions used by memory accesses requiring a scalar use, 4249 // and (3) pointer induction variables and their update instructions (we 4250 // currently only scalarize these). 4251 // 4252 // (1) Add to the worklist all instructions that have been identified as 4253 // uniform-after-vectorization. 4254 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4255 4256 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4257 // memory accesses requiring a scalar use. The pointer operands of loads and 4258 // stores will be scalar as long as the memory accesses is not a gather or 4259 // scatter operation. The value operand of a store will remain scalar if the 4260 // store is scalarized. 4261 for (auto *BB : TheLoop->blocks()) 4262 for (auto &I : *BB) { 4263 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4264 evaluatePtrUse(Load, Load->getPointerOperand()); 4265 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4266 evaluatePtrUse(Store, Store->getPointerOperand()); 4267 evaluatePtrUse(Store, Store->getValueOperand()); 4268 } 4269 } 4270 for (auto *I : ScalarPtrs) 4271 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4272 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4273 Worklist.insert(I); 4274 } 4275 4276 // (3) Add to the worklist all pointer induction variables and their update 4277 // instructions. 4278 // 4279 // TODO: Once we are able to vectorize pointer induction variables we should 4280 // no longer insert them into the worklist here. 4281 auto *Latch = TheLoop->getLoopLatch(); 4282 for (auto &Induction : *Legal->getInductionVars()) { 4283 auto *Ind = Induction.first; 4284 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4285 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4286 continue; 4287 Worklist.insert(Ind); 4288 Worklist.insert(IndUpdate); 4289 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4290 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4291 << "\n"); 4292 } 4293 4294 // Insert the forced scalars. 4295 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4296 // induction variable when the PHI user is scalarized. 4297 auto ForcedScalar = ForcedScalars.find(VF); 4298 if (ForcedScalar != ForcedScalars.end()) 4299 for (auto *I : ForcedScalar->second) 4300 Worklist.insert(I); 4301 4302 // Expand the worklist by looking through any bitcasts and getelementptr 4303 // instructions we've already identified as scalar. This is similar to the 4304 // expansion step in collectLoopUniforms(); however, here we're only 4305 // expanding to include additional bitcasts and getelementptr instructions. 4306 unsigned Idx = 0; 4307 while (Idx != Worklist.size()) { 4308 Instruction *Dst = Worklist[Idx++]; 4309 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4310 continue; 4311 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4312 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4313 auto *J = cast<Instruction>(U); 4314 return !TheLoop->contains(J) || Worklist.count(J) || 4315 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4316 isScalarUse(J, Src)); 4317 })) { 4318 Worklist.insert(Src); 4319 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4320 } 4321 } 4322 4323 // An induction variable will remain scalar if all users of the induction 4324 // variable and induction variable update remain scalar. 4325 for (auto &Induction : *Legal->getInductionVars()) { 4326 auto *Ind = Induction.first; 4327 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4328 4329 // We already considered pointer induction variables, so there's no reason 4330 // to look at their users again. 4331 // 4332 // TODO: Once we are able to vectorize pointer induction variables we 4333 // should no longer skip over them here. 4334 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4335 continue; 4336 4337 // Determine if all users of the induction variable are scalar after 4338 // vectorization. 4339 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4340 auto *I = cast<Instruction>(U); 4341 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4342 }); 4343 if (!ScalarInd) 4344 continue; 4345 4346 // Determine if all users of the induction variable update instruction are 4347 // scalar after vectorization. 4348 auto ScalarIndUpdate = 4349 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4350 auto *I = cast<Instruction>(U); 4351 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4352 }); 4353 if (!ScalarIndUpdate) 4354 continue; 4355 4356 // The induction variable and its update instruction will remain scalar. 4357 Worklist.insert(Ind); 4358 Worklist.insert(IndUpdate); 4359 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4360 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4361 << "\n"); 4362 } 4363 4364 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4365 } 4366 4367 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4368 if (!blockNeedsPredication(I->getParent())) 4369 return false; 4370 switch(I->getOpcode()) { 4371 default: 4372 break; 4373 case Instruction::Load: 4374 case Instruction::Store: { 4375 if (!Legal->isMaskRequired(I)) 4376 return false; 4377 auto *Ptr = getLoadStorePointerOperand(I); 4378 auto *Ty = getMemInstValueType(I); 4379 // We have already decided how to vectorize this instruction, get that 4380 // result. 4381 if (VF > 1) { 4382 InstWidening WideningDecision = getWideningDecision(I, VF); 4383 assert(WideningDecision != CM_Unknown && 4384 "Widening decision should be ready at this moment"); 4385 return WideningDecision == CM_Scalarize; 4386 } 4387 return isa<LoadInst>(I) ? 4388 !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) 4389 : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty)); 4390 } 4391 case Instruction::UDiv: 4392 case Instruction::SDiv: 4393 case Instruction::SRem: 4394 case Instruction::URem: 4395 return mayDivideByZero(*I); 4396 } 4397 return false; 4398 } 4399 4400 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4401 unsigned VF) { 4402 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4403 assert(getWideningDecision(I, VF) == CM_Unknown && 4404 "Decision should not be set yet."); 4405 auto *Group = getInterleavedAccessGroup(I); 4406 assert(Group && "Must have a group."); 4407 4408 // Check if masking is required. 4409 // A Group may need masking for one of two reasons: it resides in a block that 4410 // needs predication, or it was decided to use masking to deal with gaps. 4411 bool PredicatedAccessRequiresMasking = 4412 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4413 bool AccessWithGapsRequiresMasking = 4414 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 4415 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4416 return true; 4417 4418 // If masked interleaving is required, we expect that the user/target had 4419 // enabled it, because otherwise it either wouldn't have been created or 4420 // it should have been invalidated by the CostModel. 4421 assert(useMaskedInterleavedAccesses(TTI) && 4422 "Masked interleave-groups for predicated accesses are not enabled."); 4423 4424 auto *Ty = getMemInstValueType(I); 4425 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty) 4426 : TTI.isLegalMaskedStore(Ty); 4427 } 4428 4429 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4430 unsigned VF) { 4431 // Get and ensure we have a valid memory instruction. 4432 LoadInst *LI = dyn_cast<LoadInst>(I); 4433 StoreInst *SI = dyn_cast<StoreInst>(I); 4434 assert((LI || SI) && "Invalid memory instruction"); 4435 4436 auto *Ptr = getLoadStorePointerOperand(I); 4437 4438 // In order to be widened, the pointer should be consecutive, first of all. 4439 if (!Legal->isConsecutivePtr(Ptr)) 4440 return false; 4441 4442 // If the instruction is a store located in a predicated block, it will be 4443 // scalarized. 4444 if (isScalarWithPredication(I)) 4445 return false; 4446 4447 // If the instruction's allocated size doesn't equal it's type size, it 4448 // requires padding and will be scalarized. 4449 auto &DL = I->getModule()->getDataLayout(); 4450 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4451 if (hasIrregularType(ScalarTy, DL, VF)) 4452 return false; 4453 4454 return true; 4455 } 4456 4457 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4458 // We should not collect Uniforms more than once per VF. Right now, 4459 // this function is called from collectUniformsAndScalars(), which 4460 // already does this check. Collecting Uniforms for VF=1 does not make any 4461 // sense. 4462 4463 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4464 "This function should not be visited twice for the same VF"); 4465 4466 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4467 // not analyze again. Uniforms.count(VF) will return 1. 4468 Uniforms[VF].clear(); 4469 4470 // We now know that the loop is vectorizable! 4471 // Collect instructions inside the loop that will remain uniform after 4472 // vectorization. 4473 4474 // Global values, params and instructions outside of current loop are out of 4475 // scope. 4476 auto isOutOfScope = [&](Value *V) -> bool { 4477 Instruction *I = dyn_cast<Instruction>(V); 4478 return (!I || !TheLoop->contains(I)); 4479 }; 4480 4481 SetVector<Instruction *> Worklist; 4482 BasicBlock *Latch = TheLoop->getLoopLatch(); 4483 4484 // Start with the conditional branch. If the branch condition is an 4485 // instruction contained in the loop that is only used by the branch, it is 4486 // uniform. 4487 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4488 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 4489 Worklist.insert(Cmp); 4490 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 4491 } 4492 4493 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4494 // are pointers that are treated like consecutive pointers during 4495 // vectorization. The pointer operands of interleaved accesses are an 4496 // example. 4497 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4498 4499 // Holds pointer operands of instructions that are possibly non-uniform. 4500 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4501 4502 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4503 InstWidening WideningDecision = getWideningDecision(I, VF); 4504 assert(WideningDecision != CM_Unknown && 4505 "Widening decision should be ready at this moment"); 4506 4507 return (WideningDecision == CM_Widen || 4508 WideningDecision == CM_Widen_Reverse || 4509 WideningDecision == CM_Interleave); 4510 }; 4511 // Iterate over the instructions in the loop, and collect all 4512 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4513 // that a consecutive-like pointer operand will be scalarized, we collect it 4514 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4515 // getelementptr instruction can be used by both vectorized and scalarized 4516 // memory instructions. For example, if a loop loads and stores from the same 4517 // location, but the store is conditional, the store will be scalarized, and 4518 // the getelementptr won't remain uniform. 4519 for (auto *BB : TheLoop->blocks()) 4520 for (auto &I : *BB) { 4521 // If there's no pointer operand, there's nothing to do. 4522 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4523 if (!Ptr) 4524 continue; 4525 4526 // True if all users of Ptr are memory accesses that have Ptr as their 4527 // pointer operand. 4528 auto UsersAreMemAccesses = 4529 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4530 return getLoadStorePointerOperand(U) == Ptr; 4531 }); 4532 4533 // Ensure the memory instruction will not be scalarized or used by 4534 // gather/scatter, making its pointer operand non-uniform. If the pointer 4535 // operand is used by any instruction other than a memory access, we 4536 // conservatively assume the pointer operand may be non-uniform. 4537 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4538 PossibleNonUniformPtrs.insert(Ptr); 4539 4540 // If the memory instruction will be vectorized and its pointer operand 4541 // is consecutive-like, or interleaving - the pointer operand should 4542 // remain uniform. 4543 else 4544 ConsecutiveLikePtrs.insert(Ptr); 4545 } 4546 4547 // Add to the Worklist all consecutive and consecutive-like pointers that 4548 // aren't also identified as possibly non-uniform. 4549 for (auto *V : ConsecutiveLikePtrs) 4550 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) { 4551 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 4552 Worklist.insert(V); 4553 } 4554 4555 // Expand Worklist in topological order: whenever a new instruction 4556 // is added , its users should be already inside Worklist. It ensures 4557 // a uniform instruction will only be used by uniform instructions. 4558 unsigned idx = 0; 4559 while (idx != Worklist.size()) { 4560 Instruction *I = Worklist[idx++]; 4561 4562 for (auto OV : I->operand_values()) { 4563 // isOutOfScope operands cannot be uniform instructions. 4564 if (isOutOfScope(OV)) 4565 continue; 4566 // First order recurrence Phi's should typically be considered 4567 // non-uniform. 4568 auto *OP = dyn_cast<PHINode>(OV); 4569 if (OP && Legal->isFirstOrderRecurrence(OP)) 4570 continue; 4571 // If all the users of the operand are uniform, then add the 4572 // operand into the uniform worklist. 4573 auto *OI = cast<Instruction>(OV); 4574 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4575 auto *J = cast<Instruction>(U); 4576 return Worklist.count(J) || 4577 (OI == getLoadStorePointerOperand(J) && 4578 isUniformDecision(J, VF)); 4579 })) { 4580 Worklist.insert(OI); 4581 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 4582 } 4583 } 4584 } 4585 4586 // Returns true if Ptr is the pointer operand of a memory access instruction 4587 // I, and I is known to not require scalarization. 4588 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4589 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4590 }; 4591 4592 // For an instruction to be added into Worklist above, all its users inside 4593 // the loop should also be in Worklist. However, this condition cannot be 4594 // true for phi nodes that form a cyclic dependence. We must process phi 4595 // nodes separately. An induction variable will remain uniform if all users 4596 // of the induction variable and induction variable update remain uniform. 4597 // The code below handles both pointer and non-pointer induction variables. 4598 for (auto &Induction : *Legal->getInductionVars()) { 4599 auto *Ind = Induction.first; 4600 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4601 4602 // Determine if all users of the induction variable are uniform after 4603 // vectorization. 4604 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4605 auto *I = cast<Instruction>(U); 4606 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4607 isVectorizedMemAccessUse(I, Ind); 4608 }); 4609 if (!UniformInd) 4610 continue; 4611 4612 // Determine if all users of the induction variable update instruction are 4613 // uniform after vectorization. 4614 auto UniformIndUpdate = 4615 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4616 auto *I = cast<Instruction>(U); 4617 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4618 isVectorizedMemAccessUse(I, IndUpdate); 4619 }); 4620 if (!UniformIndUpdate) 4621 continue; 4622 4623 // The induction variable and its update instruction will remain uniform. 4624 Worklist.insert(Ind); 4625 Worklist.insert(IndUpdate); 4626 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 4627 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate 4628 << "\n"); 4629 } 4630 4631 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4632 } 4633 4634 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 4635 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4636 // TODO: It may by useful to do since it's still likely to be dynamically 4637 // uniform if the target can skip. 4638 LLVM_DEBUG( 4639 dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 4640 4641 ORE->emit( 4642 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 4643 << "runtime pointer checks needed. Not enabled for divergent target"); 4644 4645 return None; 4646 } 4647 4648 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4649 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 4650 return computeFeasibleMaxVF(OptForSize, TC); 4651 4652 if (Legal->getRuntimePointerChecking()->Need) { 4653 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4654 << "runtime pointer checks needed. Enable vectorization of this " 4655 "loop with '#pragma clang loop vectorize(enable)' when " 4656 "compiling with -Os/-Oz"); 4657 LLVM_DEBUG( 4658 dbgs() 4659 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4660 return None; 4661 } 4662 4663 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4664 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4665 << "runtime SCEV checks needed. Enable vectorization of this " 4666 "loop with '#pragma clang loop vectorize(enable)' when " 4667 "compiling with -Os/-Oz"); 4668 LLVM_DEBUG( 4669 dbgs() 4670 << "LV: Aborting. Runtime SCEV check is required with -Os/-Oz.\n"); 4671 return None; 4672 } 4673 4674 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4675 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4676 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4677 << "runtime stride == 1 checks needed. Enable vectorization of " 4678 "this loop with '#pragma clang loop vectorize(enable)' when " 4679 "compiling with -Os/-Oz"); 4680 LLVM_DEBUG( 4681 dbgs() 4682 << "LV: Aborting. Runtime stride check is required with -Os/-Oz.\n"); 4683 return None; 4684 } 4685 4686 // If we optimize the program for size, avoid creating the tail loop. 4687 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4688 4689 if (TC == 1) { 4690 ORE->emit(createMissedAnalysis("SingleIterationLoop") 4691 << "loop trip count is one, irrelevant for vectorization"); 4692 LLVM_DEBUG(dbgs() << "LV: Aborting, single iteration (non) loop.\n"); 4693 return None; 4694 } 4695 4696 // Record that scalar epilogue is not allowed. 4697 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4698 4699 IsScalarEpilogueAllowed = !OptForSize; 4700 4701 // We don't create an epilogue when optimizing for size. 4702 // Invalidate interleave groups that require an epilogue if we can't mask 4703 // the interleave-group. 4704 if (!useMaskedInterleavedAccesses(TTI)) 4705 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4706 4707 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 4708 4709 if (TC > 0 && TC % MaxVF == 0) { 4710 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 4711 return MaxVF; 4712 } 4713 4714 // If we don't know the precise trip count, or if the trip count that we 4715 // found modulo the vectorization factor is not zero, try to fold the tail 4716 // by masking. 4717 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 4718 if (Legal->canFoldTailByMasking()) { 4719 FoldTailByMasking = true; 4720 return MaxVF; 4721 } 4722 4723 if (TC == 0) { 4724 ORE->emit( 4725 createMissedAnalysis("UnknownLoopCountComplexCFG") 4726 << "unable to calculate the loop count due to complex control flow"); 4727 return None; 4728 } 4729 4730 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 4731 << "cannot optimize for size and vectorize at the same time. " 4732 "Enable vectorization of this loop with '#pragma clang loop " 4733 "vectorize(enable)' when compiling with -Os/-Oz"); 4734 return None; 4735 } 4736 4737 unsigned 4738 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 4739 unsigned ConstTripCount) { 4740 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4741 unsigned SmallestType, WidestType; 4742 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4743 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4744 4745 // Get the maximum safe dependence distance in bits computed by LAA. 4746 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4747 // the memory accesses that is most restrictive (involved in the smallest 4748 // dependence distance). 4749 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 4750 4751 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 4752 4753 unsigned MaxVectorSize = WidestRegister / WidestType; 4754 4755 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4756 << " / " << WidestType << " bits.\n"); 4757 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 4758 << WidestRegister << " bits.\n"); 4759 4760 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 4761 " into one vector!"); 4762 if (MaxVectorSize == 0) { 4763 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4764 MaxVectorSize = 1; 4765 return MaxVectorSize; 4766 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 4767 isPowerOf2_32(ConstTripCount)) { 4768 // We need to clamp the VF to be the ConstTripCount. There is no point in 4769 // choosing a higher viable VF as done in the loop below. 4770 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 4771 << ConstTripCount << "\n"); 4772 MaxVectorSize = ConstTripCount; 4773 return MaxVectorSize; 4774 } 4775 4776 unsigned MaxVF = MaxVectorSize; 4777 if (TTI.shouldMaximizeVectorBandwidth(OptForSize) || 4778 (MaximizeBandwidth && !OptForSize)) { 4779 // Collect all viable vectorization factors larger than the default MaxVF 4780 // (i.e. MaxVectorSize). 4781 SmallVector<unsigned, 8> VFs; 4782 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 4783 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 4784 VFs.push_back(VS); 4785 4786 // For each VF calculate its register usage. 4787 auto RUs = calculateRegisterUsage(VFs); 4788 4789 // Select the largest VF which doesn't require more registers than existing 4790 // ones. 4791 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 4792 for (int i = RUs.size() - 1; i >= 0; --i) { 4793 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 4794 MaxVF = VFs[i]; 4795 break; 4796 } 4797 } 4798 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 4799 if (MaxVF < MinVF) { 4800 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 4801 << ") with target's minimum: " << MinVF << '\n'); 4802 MaxVF = MinVF; 4803 } 4804 } 4805 } 4806 return MaxVF; 4807 } 4808 4809 VectorizationFactor 4810 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 4811 float Cost = expectedCost(1).first; 4812 const float ScalarCost = Cost; 4813 unsigned Width = 1; 4814 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4815 4816 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4817 if (ForceVectorization && MaxVF > 1) { 4818 // Ignore scalar width, because the user explicitly wants vectorization. 4819 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 4820 // evaluation. 4821 Cost = std::numeric_limits<float>::max(); 4822 } 4823 4824 for (unsigned i = 2; i <= MaxVF; i *= 2) { 4825 // Notice that the vector loop needs to be executed less times, so 4826 // we need to divide the cost of the vector loops by the width of 4827 // the vector elements. 4828 VectorizationCostTy C = expectedCost(i); 4829 float VectorCost = C.first / (float)i; 4830 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 4831 << " costs: " << (int)VectorCost << ".\n"); 4832 if (!C.second && !ForceVectorization) { 4833 LLVM_DEBUG( 4834 dbgs() << "LV: Not considering vector loop of width " << i 4835 << " because it will not generate any vector instructions.\n"); 4836 continue; 4837 } 4838 if (VectorCost < Cost) { 4839 Cost = VectorCost; 4840 Width = i; 4841 } 4842 } 4843 4844 if (!EnableCondStoresVectorization && NumPredStores) { 4845 ORE->emit(createMissedAnalysis("ConditionalStore") 4846 << "store that is conditionally executed prevents vectorization"); 4847 LLVM_DEBUG( 4848 dbgs() << "LV: No vectorization. There are conditional stores.\n"); 4849 Width = 1; 4850 Cost = ScalarCost; 4851 } 4852 4853 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 4854 << "LV: Vectorization seems to be not beneficial, " 4855 << "but was forced by a user.\n"); 4856 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 4857 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 4858 return Factor; 4859 } 4860 4861 std::pair<unsigned, unsigned> 4862 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 4863 unsigned MinWidth = -1U; 4864 unsigned MaxWidth = 8; 4865 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 4866 4867 // For each block. 4868 for (BasicBlock *BB : TheLoop->blocks()) { 4869 // For each instruction in the loop. 4870 for (Instruction &I : BB->instructionsWithoutDebug()) { 4871 Type *T = I.getType(); 4872 4873 // Skip ignored values. 4874 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 4875 continue; 4876 4877 // Only examine Loads, Stores and PHINodes. 4878 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 4879 continue; 4880 4881 // Examine PHI nodes that are reduction variables. Update the type to 4882 // account for the recurrence type. 4883 if (auto *PN = dyn_cast<PHINode>(&I)) { 4884 if (!Legal->isReductionVariable(PN)) 4885 continue; 4886 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 4887 T = RdxDesc.getRecurrenceType(); 4888 } 4889 4890 // Examine the stored values. 4891 if (auto *ST = dyn_cast<StoreInst>(&I)) 4892 T = ST->getValueOperand()->getType(); 4893 4894 // Ignore loaded pointer types and stored pointer types that are not 4895 // vectorizable. 4896 // 4897 // FIXME: The check here attempts to predict whether a load or store will 4898 // be vectorized. We only know this for certain after a VF has 4899 // been selected. Here, we assume that if an access can be 4900 // vectorized, it will be. We should also look at extending this 4901 // optimization to non-pointer types. 4902 // 4903 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 4904 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 4905 continue; 4906 4907 MinWidth = std::min(MinWidth, 4908 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4909 MaxWidth = std::max(MaxWidth, 4910 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4911 } 4912 } 4913 4914 return {MinWidth, MaxWidth}; 4915 } 4916 4917 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 4918 unsigned VF, 4919 unsigned LoopCost) { 4920 // -- The interleave heuristics -- 4921 // We interleave the loop in order to expose ILP and reduce the loop overhead. 4922 // There are many micro-architectural considerations that we can't predict 4923 // at this level. For example, frontend pressure (on decode or fetch) due to 4924 // code size, or the number and capabilities of the execution ports. 4925 // 4926 // We use the following heuristics to select the interleave count: 4927 // 1. If the code has reductions, then we interleave to break the cross 4928 // iteration dependency. 4929 // 2. If the loop is really small, then we interleave to reduce the loop 4930 // overhead. 4931 // 3. We don't interleave if we think that we will spill registers to memory 4932 // due to the increased register pressure. 4933 4934 // When we optimize for size, we don't interleave. 4935 if (OptForSize) 4936 return 1; 4937 4938 // We used the distance for the interleave count. 4939 if (Legal->getMaxSafeDepDistBytes() != -1U) 4940 return 1; 4941 4942 // Do not interleave loops with a relatively small trip count. 4943 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4944 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 4945 return 1; 4946 4947 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 4948 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 4949 << " registers\n"); 4950 4951 if (VF == 1) { 4952 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 4953 TargetNumRegisters = ForceTargetNumScalarRegs; 4954 } else { 4955 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 4956 TargetNumRegisters = ForceTargetNumVectorRegs; 4957 } 4958 4959 RegisterUsage R = calculateRegisterUsage({VF})[0]; 4960 // We divide by these constants so assume that we have at least one 4961 // instruction that uses at least one register. 4962 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 4963 4964 // We calculate the interleave count using the following formula. 4965 // Subtract the number of loop invariants from the number of available 4966 // registers. These registers are used by all of the interleaved instances. 4967 // Next, divide the remaining registers by the number of registers that is 4968 // required by the loop, in order to estimate how many parallel instances 4969 // fit without causing spills. All of this is rounded down if necessary to be 4970 // a power of two. We want power of two interleave count to simplify any 4971 // addressing operations or alignment considerations. 4972 // We also want power of two interleave counts to ensure that the induction 4973 // variable of the vector loop wraps to zero, when tail is folded by masking; 4974 // this currently happens when OptForSize, in which case IC is set to 1 above. 4975 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 4976 R.MaxLocalUsers); 4977 4978 // Don't count the induction variable as interleaved. 4979 if (EnableIndVarRegisterHeur) 4980 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 4981 std::max(1U, (R.MaxLocalUsers - 1))); 4982 4983 // Clamp the interleave ranges to reasonable counts. 4984 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 4985 4986 // Check if the user has overridden the max. 4987 if (VF == 1) { 4988 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 4989 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 4990 } else { 4991 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 4992 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 4993 } 4994 4995 // If we did not calculate the cost for VF (because the user selected the VF) 4996 // then we calculate the cost of VF here. 4997 if (LoopCost == 0) 4998 LoopCost = expectedCost(VF).first; 4999 5000 // Clamp the calculated IC to be between the 1 and the max interleave count 5001 // that the target allows. 5002 if (IC > MaxInterleaveCount) 5003 IC = MaxInterleaveCount; 5004 else if (IC < 1) 5005 IC = 1; 5006 5007 // Interleave if we vectorized this loop and there is a reduction that could 5008 // benefit from interleaving. 5009 if (VF > 1 && !Legal->getReductionVars()->empty()) { 5010 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5011 return IC; 5012 } 5013 5014 // Note that if we've already vectorized the loop we will have done the 5015 // runtime check and so interleaving won't require further checks. 5016 bool InterleavingRequiresRuntimePointerCheck = 5017 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5018 5019 // We want to interleave small loops in order to reduce the loop overhead and 5020 // potentially expose ILP opportunities. 5021 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5022 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5023 // We assume that the cost overhead is 1 and we use the cost model 5024 // to estimate the cost of the loop and interleave until the cost of the 5025 // loop overhead is about 5% of the cost of the loop. 5026 unsigned SmallIC = 5027 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5028 5029 // Interleave until store/load ports (estimated by max interleave count) are 5030 // saturated. 5031 unsigned NumStores = Legal->getNumStores(); 5032 unsigned NumLoads = Legal->getNumLoads(); 5033 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5034 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5035 5036 // If we have a scalar reduction (vector reductions are already dealt with 5037 // by this point), we can increase the critical path length if the loop 5038 // we're interleaving is inside another loop. Limit, by default to 2, so the 5039 // critical path only gets increased by one reduction operation. 5040 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 5041 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5042 SmallIC = std::min(SmallIC, F); 5043 StoresIC = std::min(StoresIC, F); 5044 LoadsIC = std::min(LoadsIC, F); 5045 } 5046 5047 if (EnableLoadStoreRuntimeInterleave && 5048 std::max(StoresIC, LoadsIC) > SmallIC) { 5049 LLVM_DEBUG( 5050 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5051 return std::max(StoresIC, LoadsIC); 5052 } 5053 5054 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5055 return SmallIC; 5056 } 5057 5058 // Interleave if this is a large loop (small loops are already dealt with by 5059 // this point) that could benefit from interleaving. 5060 bool HasReductions = !Legal->getReductionVars()->empty(); 5061 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5062 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5063 return IC; 5064 } 5065 5066 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5067 return 1; 5068 } 5069 5070 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5071 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5072 // This function calculates the register usage by measuring the highest number 5073 // of values that are alive at a single location. Obviously, this is a very 5074 // rough estimation. We scan the loop in a topological order in order and 5075 // assign a number to each instruction. We use RPO to ensure that defs are 5076 // met before their users. We assume that each instruction that has in-loop 5077 // users starts an interval. We record every time that an in-loop value is 5078 // used, so we have a list of the first and last occurrences of each 5079 // instruction. Next, we transpose this data structure into a multi map that 5080 // holds the list of intervals that *end* at a specific location. This multi 5081 // map allows us to perform a linear search. We scan the instructions linearly 5082 // and record each time that a new interval starts, by placing it in a set. 5083 // If we find this value in the multi-map then we remove it from the set. 5084 // The max register usage is the maximum size of the set. 5085 // We also search for instructions that are defined outside the loop, but are 5086 // used inside the loop. We need this number separately from the max-interval 5087 // usage number because when we unroll, loop-invariant values do not take 5088 // more register. 5089 LoopBlocksDFS DFS(TheLoop); 5090 DFS.perform(LI); 5091 5092 RegisterUsage RU; 5093 5094 // Each 'key' in the map opens a new interval. The values 5095 // of the map are the index of the 'last seen' usage of the 5096 // instruction that is the key. 5097 using IntervalMap = DenseMap<Instruction *, unsigned>; 5098 5099 // Maps instruction to its index. 5100 SmallVector<Instruction *, 64> IdxToInstr; 5101 // Marks the end of each interval. 5102 IntervalMap EndPoint; 5103 // Saves the list of instruction indices that are used in the loop. 5104 SmallPtrSet<Instruction *, 8> Ends; 5105 // Saves the list of values that are used in the loop but are 5106 // defined outside the loop, such as arguments and constants. 5107 SmallPtrSet<Value *, 8> LoopInvariants; 5108 5109 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5110 for (Instruction &I : BB->instructionsWithoutDebug()) { 5111 IdxToInstr.push_back(&I); 5112 5113 // Save the end location of each USE. 5114 for (Value *U : I.operands()) { 5115 auto *Instr = dyn_cast<Instruction>(U); 5116 5117 // Ignore non-instruction values such as arguments, constants, etc. 5118 if (!Instr) 5119 continue; 5120 5121 // If this instruction is outside the loop then record it and continue. 5122 if (!TheLoop->contains(Instr)) { 5123 LoopInvariants.insert(Instr); 5124 continue; 5125 } 5126 5127 // Overwrite previous end points. 5128 EndPoint[Instr] = IdxToInstr.size(); 5129 Ends.insert(Instr); 5130 } 5131 } 5132 } 5133 5134 // Saves the list of intervals that end with the index in 'key'. 5135 using InstrList = SmallVector<Instruction *, 2>; 5136 DenseMap<unsigned, InstrList> TransposeEnds; 5137 5138 // Transpose the EndPoints to a list of values that end at each index. 5139 for (auto &Interval : EndPoint) 5140 TransposeEnds[Interval.second].push_back(Interval.first); 5141 5142 SmallPtrSet<Instruction *, 8> OpenIntervals; 5143 5144 // Get the size of the widest register. 5145 unsigned MaxSafeDepDist = -1U; 5146 if (Legal->getMaxSafeDepDistBytes() != -1U) 5147 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5148 unsigned WidestRegister = 5149 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5150 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5151 5152 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5153 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5154 5155 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5156 5157 // A lambda that gets the register usage for the given type and VF. 5158 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5159 if (Ty->isTokenTy()) 5160 return 0U; 5161 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5162 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5163 }; 5164 5165 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5166 Instruction *I = IdxToInstr[i]; 5167 5168 // Remove all of the instructions that end at this location. 5169 InstrList &List = TransposeEnds[i]; 5170 for (Instruction *ToRemove : List) 5171 OpenIntervals.erase(ToRemove); 5172 5173 // Ignore instructions that are never used within the loop. 5174 if (Ends.find(I) == Ends.end()) 5175 continue; 5176 5177 // Skip ignored values. 5178 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5179 continue; 5180 5181 // For each VF find the maximum usage of registers. 5182 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5183 if (VFs[j] == 1) { 5184 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5185 continue; 5186 } 5187 collectUniformsAndScalars(VFs[j]); 5188 // Count the number of live intervals. 5189 unsigned RegUsage = 0; 5190 for (auto Inst : OpenIntervals) { 5191 // Skip ignored values for VF > 1. 5192 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() || 5193 isScalarAfterVectorization(Inst, VFs[j])) 5194 continue; 5195 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5196 } 5197 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5198 } 5199 5200 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5201 << OpenIntervals.size() << '\n'); 5202 5203 // Add the current instruction to the list of open intervals. 5204 OpenIntervals.insert(I); 5205 } 5206 5207 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5208 unsigned Invariant = 0; 5209 if (VFs[i] == 1) 5210 Invariant = LoopInvariants.size(); 5211 else { 5212 for (auto Inst : LoopInvariants) 5213 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5214 } 5215 5216 LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5217 LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5218 LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant 5219 << '\n'); 5220 5221 RU.LoopInvariantRegs = Invariant; 5222 RU.MaxLocalUsers = MaxUsages[i]; 5223 RUs[i] = RU; 5224 } 5225 5226 return RUs; 5227 } 5228 5229 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5230 // TODO: Cost model for emulated masked load/store is completely 5231 // broken. This hack guides the cost model to use an artificially 5232 // high enough value to practically disable vectorization with such 5233 // operations, except where previously deployed legality hack allowed 5234 // using very low cost values. This is to avoid regressions coming simply 5235 // from moving "masked load/store" check from legality to cost model. 5236 // Masked Load/Gather emulation was previously never allowed. 5237 // Limited number of Masked Store/Scatter emulation was allowed. 5238 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5239 return isa<LoadInst>(I) || 5240 (isa<StoreInst>(I) && 5241 NumPredStores > NumberOfStoresToPredicate); 5242 } 5243 5244 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5245 // If we aren't vectorizing the loop, or if we've already collected the 5246 // instructions to scalarize, there's nothing to do. Collection may already 5247 // have occurred if we have a user-selected VF and are now computing the 5248 // expected cost for interleaving. 5249 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5250 return; 5251 5252 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5253 // not profitable to scalarize any instructions, the presence of VF in the 5254 // map will indicate that we've analyzed it already. 5255 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5256 5257 // Find all the instructions that are scalar with predication in the loop and 5258 // determine if it would be better to not if-convert the blocks they are in. 5259 // If so, we also record the instructions to scalarize. 5260 for (BasicBlock *BB : TheLoop->blocks()) { 5261 if (!blockNeedsPredication(BB)) 5262 continue; 5263 for (Instruction &I : *BB) 5264 if (isScalarWithPredication(&I)) { 5265 ScalarCostsTy ScalarCosts; 5266 // Do not apply discount logic if hacked cost is needed 5267 // for emulated masked memrefs. 5268 if (!useEmulatedMaskMemRefHack(&I) && 5269 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5270 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5271 // Remember that BB will remain after vectorization. 5272 PredicatedBBsAfterVectorization.insert(BB); 5273 } 5274 } 5275 } 5276 5277 int LoopVectorizationCostModel::computePredInstDiscount( 5278 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5279 unsigned VF) { 5280 assert(!isUniformAfterVectorization(PredInst, VF) && 5281 "Instruction marked uniform-after-vectorization will be predicated"); 5282 5283 // Initialize the discount to zero, meaning that the scalar version and the 5284 // vector version cost the same. 5285 int Discount = 0; 5286 5287 // Holds instructions to analyze. The instructions we visit are mapped in 5288 // ScalarCosts. Those instructions are the ones that would be scalarized if 5289 // we find that the scalar version costs less. 5290 SmallVector<Instruction *, 8> Worklist; 5291 5292 // Returns true if the given instruction can be scalarized. 5293 auto canBeScalarized = [&](Instruction *I) -> bool { 5294 // We only attempt to scalarize instructions forming a single-use chain 5295 // from the original predicated block that would otherwise be vectorized. 5296 // Although not strictly necessary, we give up on instructions we know will 5297 // already be scalar to avoid traversing chains that are unlikely to be 5298 // beneficial. 5299 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5300 isScalarAfterVectorization(I, VF)) 5301 return false; 5302 5303 // If the instruction is scalar with predication, it will be analyzed 5304 // separately. We ignore it within the context of PredInst. 5305 if (isScalarWithPredication(I)) 5306 return false; 5307 5308 // If any of the instruction's operands are uniform after vectorization, 5309 // the instruction cannot be scalarized. This prevents, for example, a 5310 // masked load from being scalarized. 5311 // 5312 // We assume we will only emit a value for lane zero of an instruction 5313 // marked uniform after vectorization, rather than VF identical values. 5314 // Thus, if we scalarize an instruction that uses a uniform, we would 5315 // create uses of values corresponding to the lanes we aren't emitting code 5316 // for. This behavior can be changed by allowing getScalarValue to clone 5317 // the lane zero values for uniforms rather than asserting. 5318 for (Use &U : I->operands()) 5319 if (auto *J = dyn_cast<Instruction>(U.get())) 5320 if (isUniformAfterVectorization(J, VF)) 5321 return false; 5322 5323 // Otherwise, we can scalarize the instruction. 5324 return true; 5325 }; 5326 5327 // Returns true if an operand that cannot be scalarized must be extracted 5328 // from a vector. We will account for this scalarization overhead below. Note 5329 // that the non-void predicated instructions are placed in their own blocks, 5330 // and their return values are inserted into vectors. Thus, an extract would 5331 // still be required. 5332 auto needsExtract = [&](Instruction *I) -> bool { 5333 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 5334 }; 5335 5336 // Compute the expected cost discount from scalarizing the entire expression 5337 // feeding the predicated instruction. We currently only consider expressions 5338 // that are single-use instruction chains. 5339 Worklist.push_back(PredInst); 5340 while (!Worklist.empty()) { 5341 Instruction *I = Worklist.pop_back_val(); 5342 5343 // If we've already analyzed the instruction, there's nothing to do. 5344 if (ScalarCosts.find(I) != ScalarCosts.end()) 5345 continue; 5346 5347 // Compute the cost of the vector instruction. Note that this cost already 5348 // includes the scalarization overhead of the predicated instruction. 5349 unsigned VectorCost = getInstructionCost(I, VF).first; 5350 5351 // Compute the cost of the scalarized instruction. This cost is the cost of 5352 // the instruction as if it wasn't if-converted and instead remained in the 5353 // predicated block. We will scale this cost by block probability after 5354 // computing the scalarization overhead. 5355 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5356 5357 // Compute the scalarization overhead of needed insertelement instructions 5358 // and phi nodes. 5359 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5360 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 5361 true, false); 5362 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5363 } 5364 5365 // Compute the scalarization overhead of needed extractelement 5366 // instructions. For each of the instruction's operands, if the operand can 5367 // be scalarized, add it to the worklist; otherwise, account for the 5368 // overhead. 5369 for (Use &U : I->operands()) 5370 if (auto *J = dyn_cast<Instruction>(U.get())) { 5371 assert(VectorType::isValidElementType(J->getType()) && 5372 "Instruction has non-scalar type"); 5373 if (canBeScalarized(J)) 5374 Worklist.push_back(J); 5375 else if (needsExtract(J)) 5376 ScalarCost += TTI.getScalarizationOverhead( 5377 ToVectorTy(J->getType(),VF), false, true); 5378 } 5379 5380 // Scale the total scalar cost by block probability. 5381 ScalarCost /= getReciprocalPredBlockProb(); 5382 5383 // Compute the discount. A non-negative discount means the vector version 5384 // of the instruction costs more, and scalarizing would be beneficial. 5385 Discount += VectorCost - ScalarCost; 5386 ScalarCosts[I] = ScalarCost; 5387 } 5388 5389 return Discount; 5390 } 5391 5392 LoopVectorizationCostModel::VectorizationCostTy 5393 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5394 VectorizationCostTy Cost; 5395 5396 // For each block. 5397 for (BasicBlock *BB : TheLoop->blocks()) { 5398 VectorizationCostTy BlockCost; 5399 5400 // For each instruction in the old loop. 5401 for (Instruction &I : BB->instructionsWithoutDebug()) { 5402 // Skip ignored values. 5403 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5404 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5405 continue; 5406 5407 VectorizationCostTy C = getInstructionCost(&I, VF); 5408 5409 // Check if we should override the cost. 5410 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5411 C.first = ForceTargetInstructionCost; 5412 5413 BlockCost.first += C.first; 5414 BlockCost.second |= C.second; 5415 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5416 << " for VF " << VF << " For instruction: " << I 5417 << '\n'); 5418 } 5419 5420 // If we are vectorizing a predicated block, it will have been 5421 // if-converted. This means that the block's instructions (aside from 5422 // stores and instructions that may divide by zero) will now be 5423 // unconditionally executed. For the scalar case, we may not always execute 5424 // the predicated block. Thus, scale the block's cost by the probability of 5425 // executing it. 5426 if (VF == 1 && blockNeedsPredication(BB)) 5427 BlockCost.first /= getReciprocalPredBlockProb(); 5428 5429 Cost.first += BlockCost.first; 5430 Cost.second |= BlockCost.second; 5431 } 5432 5433 return Cost; 5434 } 5435 5436 /// Gets Address Access SCEV after verifying that the access pattern 5437 /// is loop invariant except the induction variable dependence. 5438 /// 5439 /// This SCEV can be sent to the Target in order to estimate the address 5440 /// calculation cost. 5441 static const SCEV *getAddressAccessSCEV( 5442 Value *Ptr, 5443 LoopVectorizationLegality *Legal, 5444 PredicatedScalarEvolution &PSE, 5445 const Loop *TheLoop) { 5446 5447 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5448 if (!Gep) 5449 return nullptr; 5450 5451 // We are looking for a gep with all loop invariant indices except for one 5452 // which should be an induction variable. 5453 auto SE = PSE.getSE(); 5454 unsigned NumOperands = Gep->getNumOperands(); 5455 for (unsigned i = 1; i < NumOperands; ++i) { 5456 Value *Opd = Gep->getOperand(i); 5457 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5458 !Legal->isInductionVariable(Opd)) 5459 return nullptr; 5460 } 5461 5462 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5463 return PSE.getSCEV(Ptr); 5464 } 5465 5466 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5467 return Legal->hasStride(I->getOperand(0)) || 5468 Legal->hasStride(I->getOperand(1)); 5469 } 5470 5471 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5472 unsigned VF) { 5473 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5474 Type *ValTy = getMemInstValueType(I); 5475 auto SE = PSE.getSE(); 5476 5477 unsigned Alignment = getLoadStoreAlignment(I); 5478 unsigned AS = getLoadStoreAddressSpace(I); 5479 Value *Ptr = getLoadStorePointerOperand(I); 5480 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5481 5482 // Figure out whether the access is strided and get the stride value 5483 // if it's known in compile time 5484 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5485 5486 // Get the cost of the scalar memory instruction and address computation. 5487 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5488 5489 // Don't pass *I here, since it is scalar but will actually be part of a 5490 // vectorized loop where the user of it is a vectorized instruction. 5491 Cost += VF * 5492 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 5493 AS); 5494 5495 // Get the overhead of the extractelement and insertelement instructions 5496 // we might create due to scalarization. 5497 Cost += getScalarizationOverhead(I, VF, TTI); 5498 5499 // If we have a predicated store, it may not be executed for each vector 5500 // lane. Scale the cost by the probability of executing the predicated 5501 // block. 5502 if (isPredicatedInst(I)) { 5503 Cost /= getReciprocalPredBlockProb(); 5504 5505 if (useEmulatedMaskMemRefHack(I)) 5506 // Artificially setting to a high enough value to practically disable 5507 // vectorization with such operations. 5508 Cost = 3000000; 5509 } 5510 5511 return Cost; 5512 } 5513 5514 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5515 unsigned VF) { 5516 Type *ValTy = getMemInstValueType(I); 5517 Type *VectorTy = ToVectorTy(ValTy, VF); 5518 unsigned Alignment = getLoadStoreAlignment(I); 5519 Value *Ptr = getLoadStorePointerOperand(I); 5520 unsigned AS = getLoadStoreAddressSpace(I); 5521 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5522 5523 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5524 "Stride should be 1 or -1 for consecutive memory access"); 5525 unsigned Cost = 0; 5526 if (Legal->isMaskRequired(I)) 5527 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5528 else 5529 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 5530 5531 bool Reverse = ConsecutiveStride < 0; 5532 if (Reverse) 5533 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5534 return Cost; 5535 } 5536 5537 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5538 unsigned VF) { 5539 Type *ValTy = getMemInstValueType(I); 5540 Type *VectorTy = ToVectorTy(ValTy, VF); 5541 unsigned Alignment = getLoadStoreAlignment(I); 5542 unsigned AS = getLoadStoreAddressSpace(I); 5543 if (isa<LoadInst>(I)) { 5544 return TTI.getAddressComputationCost(ValTy) + 5545 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 5546 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5547 } 5548 StoreInst *SI = cast<StoreInst>(I); 5549 5550 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5551 return TTI.getAddressComputationCost(ValTy) + 5552 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) + 5553 (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost( 5554 Instruction::ExtractElement, 5555 VectorTy, VF - 1)); 5556 } 5557 5558 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5559 unsigned VF) { 5560 Type *ValTy = getMemInstValueType(I); 5561 Type *VectorTy = ToVectorTy(ValTy, VF); 5562 unsigned Alignment = getLoadStoreAlignment(I); 5563 Value *Ptr = getLoadStorePointerOperand(I); 5564 5565 return TTI.getAddressComputationCost(VectorTy) + 5566 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5567 Legal->isMaskRequired(I), Alignment); 5568 } 5569 5570 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5571 unsigned VF) { 5572 Type *ValTy = getMemInstValueType(I); 5573 Type *VectorTy = ToVectorTy(ValTy, VF); 5574 unsigned AS = getLoadStoreAddressSpace(I); 5575 5576 auto Group = getInterleavedAccessGroup(I); 5577 assert(Group && "Fail to get an interleaved access group."); 5578 5579 unsigned InterleaveFactor = Group->getFactor(); 5580 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5581 5582 // Holds the indices of existing members in an interleaved load group. 5583 // An interleaved store group doesn't need this as it doesn't allow gaps. 5584 SmallVector<unsigned, 4> Indices; 5585 if (isa<LoadInst>(I)) { 5586 for (unsigned i = 0; i < InterleaveFactor; i++) 5587 if (Group->getMember(i)) 5588 Indices.push_back(i); 5589 } 5590 5591 // Calculate the cost of the whole interleaved group. 5592 bool UseMaskForGaps = 5593 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 5594 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5595 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5596 Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps); 5597 5598 if (Group->isReverse()) { 5599 // TODO: Add support for reversed masked interleaved access. 5600 assert(!Legal->isMaskRequired(I) && 5601 "Reverse masked interleaved access not supported."); 5602 Cost += Group->getNumMembers() * 5603 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5604 } 5605 return Cost; 5606 } 5607 5608 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5609 unsigned VF) { 5610 // Calculate scalar cost only. Vectorization cost should be ready at this 5611 // moment. 5612 if (VF == 1) { 5613 Type *ValTy = getMemInstValueType(I); 5614 unsigned Alignment = getLoadStoreAlignment(I); 5615 unsigned AS = getLoadStoreAddressSpace(I); 5616 5617 return TTI.getAddressComputationCost(ValTy) + 5618 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 5619 } 5620 return getWideningCost(I, VF); 5621 } 5622 5623 LoopVectorizationCostModel::VectorizationCostTy 5624 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5625 // If we know that this instruction will remain uniform, check the cost of 5626 // the scalar version. 5627 if (isUniformAfterVectorization(I, VF)) 5628 VF = 1; 5629 5630 if (VF > 1 && isProfitableToScalarize(I, VF)) 5631 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5632 5633 // Forced scalars do not have any scalarization overhead. 5634 auto ForcedScalar = ForcedScalars.find(VF); 5635 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 5636 auto InstSet = ForcedScalar->second; 5637 if (InstSet.find(I) != InstSet.end()) 5638 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5639 } 5640 5641 Type *VectorTy; 5642 unsigned C = getInstructionCost(I, VF, VectorTy); 5643 5644 bool TypeNotScalarized = 5645 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 5646 return VectorizationCostTy(C, TypeNotScalarized); 5647 } 5648 5649 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 5650 if (VF == 1) 5651 return; 5652 NumPredStores = 0; 5653 for (BasicBlock *BB : TheLoop->blocks()) { 5654 // For each instruction in the old loop. 5655 for (Instruction &I : *BB) { 5656 Value *Ptr = getLoadStorePointerOperand(&I); 5657 if (!Ptr) 5658 continue; 5659 5660 // TODO: We should generate better code and update the cost model for 5661 // predicated uniform stores. Today they are treated as any other 5662 // predicated store (see added test cases in 5663 // invariant-store-vectorization.ll). 5664 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 5665 NumPredStores++; 5666 5667 if (Legal->isUniform(Ptr) && 5668 // Conditional loads and stores should be scalarized and predicated. 5669 // isScalarWithPredication cannot be used here since masked 5670 // gather/scatters are not considered scalar with predication. 5671 !Legal->blockNeedsPredication(I.getParent())) { 5672 // TODO: Avoid replicating loads and stores instead of 5673 // relying on instcombine to remove them. 5674 // Load: Scalar load + broadcast 5675 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 5676 unsigned Cost = getUniformMemOpCost(&I, VF); 5677 setWideningDecision(&I, VF, CM_Scalarize, Cost); 5678 continue; 5679 } 5680 5681 // We assume that widening is the best solution when possible. 5682 if (memoryInstructionCanBeWidened(&I, VF)) { 5683 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 5684 int ConsecutiveStride = 5685 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 5686 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5687 "Expected consecutive stride."); 5688 InstWidening Decision = 5689 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 5690 setWideningDecision(&I, VF, Decision, Cost); 5691 continue; 5692 } 5693 5694 // Choose between Interleaving, Gather/Scatter or Scalarization. 5695 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 5696 unsigned NumAccesses = 1; 5697 if (isAccessInterleaved(&I)) { 5698 auto Group = getInterleavedAccessGroup(&I); 5699 assert(Group && "Fail to get an interleaved access group."); 5700 5701 // Make one decision for the whole group. 5702 if (getWideningDecision(&I, VF) != CM_Unknown) 5703 continue; 5704 5705 NumAccesses = Group->getNumMembers(); 5706 if (interleavedAccessCanBeWidened(&I, VF)) 5707 InterleaveCost = getInterleaveGroupCost(&I, VF); 5708 } 5709 5710 unsigned GatherScatterCost = 5711 isLegalGatherOrScatter(&I) 5712 ? getGatherScatterCost(&I, VF) * NumAccesses 5713 : std::numeric_limits<unsigned>::max(); 5714 5715 unsigned ScalarizationCost = 5716 getMemInstScalarizationCost(&I, VF) * NumAccesses; 5717 5718 // Choose better solution for the current VF, 5719 // write down this decision and use it during vectorization. 5720 unsigned Cost; 5721 InstWidening Decision; 5722 if (InterleaveCost <= GatherScatterCost && 5723 InterleaveCost < ScalarizationCost) { 5724 Decision = CM_Interleave; 5725 Cost = InterleaveCost; 5726 } else if (GatherScatterCost < ScalarizationCost) { 5727 Decision = CM_GatherScatter; 5728 Cost = GatherScatterCost; 5729 } else { 5730 Decision = CM_Scalarize; 5731 Cost = ScalarizationCost; 5732 } 5733 // If the instructions belongs to an interleave group, the whole group 5734 // receives the same decision. The whole group receives the cost, but 5735 // the cost will actually be assigned to one instruction. 5736 if (auto Group = getInterleavedAccessGroup(&I)) 5737 setWideningDecision(Group, VF, Decision, Cost); 5738 else 5739 setWideningDecision(&I, VF, Decision, Cost); 5740 } 5741 } 5742 5743 // Make sure that any load of address and any other address computation 5744 // remains scalar unless there is gather/scatter support. This avoids 5745 // inevitable extracts into address registers, and also has the benefit of 5746 // activating LSR more, since that pass can't optimize vectorized 5747 // addresses. 5748 if (TTI.prefersVectorizedAddressing()) 5749 return; 5750 5751 // Start with all scalar pointer uses. 5752 SmallPtrSet<Instruction *, 8> AddrDefs; 5753 for (BasicBlock *BB : TheLoop->blocks()) 5754 for (Instruction &I : *BB) { 5755 Instruction *PtrDef = 5756 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 5757 if (PtrDef && TheLoop->contains(PtrDef) && 5758 getWideningDecision(&I, VF) != CM_GatherScatter) 5759 AddrDefs.insert(PtrDef); 5760 } 5761 5762 // Add all instructions used to generate the addresses. 5763 SmallVector<Instruction *, 4> Worklist; 5764 for (auto *I : AddrDefs) 5765 Worklist.push_back(I); 5766 while (!Worklist.empty()) { 5767 Instruction *I = Worklist.pop_back_val(); 5768 for (auto &Op : I->operands()) 5769 if (auto *InstOp = dyn_cast<Instruction>(Op)) 5770 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 5771 AddrDefs.insert(InstOp).second) 5772 Worklist.push_back(InstOp); 5773 } 5774 5775 for (auto *I : AddrDefs) { 5776 if (isa<LoadInst>(I)) { 5777 // Setting the desired widening decision should ideally be handled in 5778 // by cost functions, but since this involves the task of finding out 5779 // if the loaded register is involved in an address computation, it is 5780 // instead changed here when we know this is the case. 5781 InstWidening Decision = getWideningDecision(I, VF); 5782 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 5783 // Scalarize a widened load of address. 5784 setWideningDecision(I, VF, CM_Scalarize, 5785 (VF * getMemoryInstructionCost(I, 1))); 5786 else if (auto Group = getInterleavedAccessGroup(I)) { 5787 // Scalarize an interleave group of address loads. 5788 for (unsigned I = 0; I < Group->getFactor(); ++I) { 5789 if (Instruction *Member = Group->getMember(I)) 5790 setWideningDecision(Member, VF, CM_Scalarize, 5791 (VF * getMemoryInstructionCost(Member, 1))); 5792 } 5793 } 5794 } else 5795 // Make sure I gets scalarized and a cost estimate without 5796 // scalarization overhead. 5797 ForcedScalars[VF].insert(I); 5798 } 5799 } 5800 5801 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5802 unsigned VF, 5803 Type *&VectorTy) { 5804 Type *RetTy = I->getType(); 5805 if (canTruncateToMinimalBitwidth(I, VF)) 5806 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5807 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 5808 auto SE = PSE.getSE(); 5809 5810 // TODO: We need to estimate the cost of intrinsic calls. 5811 switch (I->getOpcode()) { 5812 case Instruction::GetElementPtr: 5813 // We mark this instruction as zero-cost because the cost of GEPs in 5814 // vectorized code depends on whether the corresponding memory instruction 5815 // is scalarized or not. Therefore, we handle GEPs with the memory 5816 // instruction cost. 5817 return 0; 5818 case Instruction::Br: { 5819 // In cases of scalarized and predicated instructions, there will be VF 5820 // predicated blocks in the vectorized loop. Each branch around these 5821 // blocks requires also an extract of its vector compare i1 element. 5822 bool ScalarPredicatedBB = false; 5823 BranchInst *BI = cast<BranchInst>(I); 5824 if (VF > 1 && BI->isConditional() && 5825 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 5826 PredicatedBBsAfterVectorization.end() || 5827 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 5828 PredicatedBBsAfterVectorization.end())) 5829 ScalarPredicatedBB = true; 5830 5831 if (ScalarPredicatedBB) { 5832 // Return cost for branches around scalarized and predicated blocks. 5833 Type *Vec_i1Ty = 5834 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 5835 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 5836 (TTI.getCFInstrCost(Instruction::Br) * VF)); 5837 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 5838 // The back-edge branch will remain, as will all scalar branches. 5839 return TTI.getCFInstrCost(Instruction::Br); 5840 else 5841 // This branch will be eliminated by if-conversion. 5842 return 0; 5843 // Note: We currently assume zero cost for an unconditional branch inside 5844 // a predicated block since it will become a fall-through, although we 5845 // may decide in the future to call TTI for all branches. 5846 } 5847 case Instruction::PHI: { 5848 auto *Phi = cast<PHINode>(I); 5849 5850 // First-order recurrences are replaced by vector shuffles inside the loop. 5851 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 5852 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5853 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5854 VectorTy, VF - 1, VectorType::get(RetTy, 1)); 5855 5856 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 5857 // converted into select instructions. We require N - 1 selects per phi 5858 // node, where N is the number of incoming values. 5859 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 5860 return (Phi->getNumIncomingValues() - 1) * 5861 TTI.getCmpSelInstrCost( 5862 Instruction::Select, ToVectorTy(Phi->getType(), VF), 5863 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 5864 5865 return TTI.getCFInstrCost(Instruction::PHI); 5866 } 5867 case Instruction::UDiv: 5868 case Instruction::SDiv: 5869 case Instruction::URem: 5870 case Instruction::SRem: 5871 // If we have a predicated instruction, it may not be executed for each 5872 // vector lane. Get the scalarization cost and scale this amount by the 5873 // probability of executing the predicated block. If the instruction is not 5874 // predicated, we fall through to the next case. 5875 if (VF > 1 && isScalarWithPredication(I)) { 5876 unsigned Cost = 0; 5877 5878 // These instructions have a non-void type, so account for the phi nodes 5879 // that we will create. This cost is likely to be zero. The phi node 5880 // cost, if any, should be scaled by the block probability because it 5881 // models a copy at the end of each predicated block. 5882 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 5883 5884 // The cost of the non-predicated instruction. 5885 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 5886 5887 // The cost of insertelement and extractelement instructions needed for 5888 // scalarization. 5889 Cost += getScalarizationOverhead(I, VF, TTI); 5890 5891 // Scale the cost by the probability of executing the predicated blocks. 5892 // This assumes the predicated block for each vector lane is equally 5893 // likely. 5894 return Cost / getReciprocalPredBlockProb(); 5895 } 5896 LLVM_FALLTHROUGH; 5897 case Instruction::Add: 5898 case Instruction::FAdd: 5899 case Instruction::Sub: 5900 case Instruction::FSub: 5901 case Instruction::Mul: 5902 case Instruction::FMul: 5903 case Instruction::FDiv: 5904 case Instruction::FRem: 5905 case Instruction::Shl: 5906 case Instruction::LShr: 5907 case Instruction::AShr: 5908 case Instruction::And: 5909 case Instruction::Or: 5910 case Instruction::Xor: { 5911 // Since we will replace the stride by 1 the multiplication should go away. 5912 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5913 return 0; 5914 // Certain instructions can be cheaper to vectorize if they have a constant 5915 // second vector operand. One example of this are shifts on x86. 5916 Value *Op2 = I->getOperand(1); 5917 TargetTransformInfo::OperandValueProperties Op2VP; 5918 TargetTransformInfo::OperandValueKind Op2VK = 5919 TTI.getOperandInfo(Op2, Op2VP); 5920 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 5921 Op2VK = TargetTransformInfo::OK_UniformValue; 5922 5923 SmallVector<const Value *, 4> Operands(I->operand_values()); 5924 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 5925 return N * TTI.getArithmeticInstrCost( 5926 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 5927 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands); 5928 } 5929 case Instruction::Select: { 5930 SelectInst *SI = cast<SelectInst>(I); 5931 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5932 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5933 Type *CondTy = SI->getCondition()->getType(); 5934 if (!ScalarCond) 5935 CondTy = VectorType::get(CondTy, VF); 5936 5937 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 5938 } 5939 case Instruction::ICmp: 5940 case Instruction::FCmp: { 5941 Type *ValTy = I->getOperand(0)->getType(); 5942 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 5943 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 5944 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 5945 VectorTy = ToVectorTy(ValTy, VF); 5946 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 5947 } 5948 case Instruction::Store: 5949 case Instruction::Load: { 5950 unsigned Width = VF; 5951 if (Width > 1) { 5952 InstWidening Decision = getWideningDecision(I, Width); 5953 assert(Decision != CM_Unknown && 5954 "CM decision should be taken at this point"); 5955 if (Decision == CM_Scalarize) 5956 Width = 1; 5957 } 5958 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 5959 return getMemoryInstructionCost(I, VF); 5960 } 5961 case Instruction::ZExt: 5962 case Instruction::SExt: 5963 case Instruction::FPToUI: 5964 case Instruction::FPToSI: 5965 case Instruction::FPExt: 5966 case Instruction::PtrToInt: 5967 case Instruction::IntToPtr: 5968 case Instruction::SIToFP: 5969 case Instruction::UIToFP: 5970 case Instruction::Trunc: 5971 case Instruction::FPTrunc: 5972 case Instruction::BitCast: { 5973 // We optimize the truncation of induction variables having constant 5974 // integer steps. The cost of these truncations is the same as the scalar 5975 // operation. 5976 if (isOptimizableIVTruncate(I, VF)) { 5977 auto *Trunc = cast<TruncInst>(I); 5978 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 5979 Trunc->getSrcTy(), Trunc); 5980 } 5981 5982 Type *SrcScalarTy = I->getOperand(0)->getType(); 5983 Type *SrcVecTy = 5984 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 5985 if (canTruncateToMinimalBitwidth(I, VF)) { 5986 // This cast is going to be shrunk. This may remove the cast or it might 5987 // turn it into slightly different cast. For example, if MinBW == 16, 5988 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 5989 // 5990 // Calculate the modified src and dest types. 5991 Type *MinVecTy = VectorTy; 5992 if (I->getOpcode() == Instruction::Trunc) { 5993 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 5994 VectorTy = 5995 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 5996 } else if (I->getOpcode() == Instruction::ZExt || 5997 I->getOpcode() == Instruction::SExt) { 5998 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 5999 VectorTy = 6000 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6001 } 6002 } 6003 6004 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6005 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 6006 } 6007 case Instruction::Call: { 6008 bool NeedToScalarize; 6009 CallInst *CI = cast<CallInst>(I); 6010 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6011 if (getVectorIntrinsicIDForCall(CI, TLI)) 6012 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6013 return CallCost; 6014 } 6015 default: 6016 // The cost of executing VF copies of the scalar instruction. This opcode 6017 // is unknown. Assume that it is the same as 'mul'. 6018 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6019 getScalarizationOverhead(I, VF, TTI); 6020 } // end of switch. 6021 } 6022 6023 char LoopVectorize::ID = 0; 6024 6025 static const char lv_name[] = "Loop Vectorization"; 6026 6027 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6028 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6029 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6030 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6031 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6032 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6033 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6034 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6035 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6036 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6037 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6038 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6039 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6040 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6041 6042 namespace llvm { 6043 6044 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6045 bool VectorizeOnlyWhenForced) { 6046 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6047 } 6048 6049 } // end namespace llvm 6050 6051 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6052 // Check if the pointer operand of a load or store instruction is 6053 // consecutive. 6054 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6055 return Legal->isConsecutivePtr(Ptr); 6056 return false; 6057 } 6058 6059 void LoopVectorizationCostModel::collectValuesToIgnore() { 6060 // Ignore ephemeral values. 6061 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6062 6063 // Ignore type-promoting instructions we identified during reduction 6064 // detection. 6065 for (auto &Reduction : *Legal->getReductionVars()) { 6066 RecurrenceDescriptor &RedDes = Reduction.second; 6067 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6068 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6069 } 6070 // Ignore type-casting instructions we identified during induction 6071 // detection. 6072 for (auto &Induction : *Legal->getInductionVars()) { 6073 InductionDescriptor &IndDes = Induction.second; 6074 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6075 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6076 } 6077 } 6078 6079 VectorizationFactor 6080 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize, 6081 unsigned UserVF) { 6082 // Width 1 means no vectorization, cost 0 means uncomputed cost. 6083 const VectorizationFactor NoVectorization = {1U, 0U}; 6084 6085 // Outer loop handling: They may require CFG and instruction level 6086 // transformations before even evaluating whether vectorization is profitable. 6087 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6088 // the vectorization pipeline. 6089 if (!OrigLoop->empty()) { 6090 // TODO: If UserVF is not provided, we set UserVF to 4 for stress testing. 6091 // This won't be necessary when UserVF is not required in the VPlan-native 6092 // path. 6093 if (VPlanBuildStressTest && !UserVF) 6094 UserVF = 4; 6095 6096 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6097 assert(UserVF && "Expected UserVF for outer loop vectorization."); 6098 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6099 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6100 buildVPlans(UserVF, UserVF); 6101 6102 // For VPlan build stress testing, we bail out after VPlan construction. 6103 if (VPlanBuildStressTest) 6104 return NoVectorization; 6105 6106 return {UserVF, 0}; 6107 } 6108 6109 LLVM_DEBUG( 6110 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6111 "VPlan-native path.\n"); 6112 return NoVectorization; 6113 } 6114 6115 VectorizationFactor 6116 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { 6117 assert(OrigLoop->empty() && "Inner loop expected."); 6118 // Width 1 means no vectorization, cost 0 means uncomputed cost. 6119 const VectorizationFactor NoVectorization = {1U, 0U}; 6120 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 6121 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize. 6122 return NoVectorization; 6123 6124 // Invalidate interleave groups if all blocks of loop will be predicated. 6125 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6126 !useMaskedInterleavedAccesses(*TTI)) { 6127 LLVM_DEBUG( 6128 dbgs() 6129 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6130 "which requires masked-interleaved support.\n"); 6131 CM.InterleaveInfo.reset(); 6132 } 6133 6134 if (UserVF) { 6135 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6136 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6137 // Collect the instructions (and their associated costs) that will be more 6138 // profitable to scalarize. 6139 CM.selectUserVectorizationFactor(UserVF); 6140 buildVPlansWithVPRecipes(UserVF, UserVF); 6141 LLVM_DEBUG(printPlans(dbgs())); 6142 return {UserVF, 0}; 6143 } 6144 6145 unsigned MaxVF = MaybeMaxVF.getValue(); 6146 assert(MaxVF != 0 && "MaxVF is zero."); 6147 6148 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6149 // Collect Uniform and Scalar instructions after vectorization with VF. 6150 CM.collectUniformsAndScalars(VF); 6151 6152 // Collect the instructions (and their associated costs) that will be more 6153 // profitable to scalarize. 6154 if (VF > 1) 6155 CM.collectInstsToScalarize(VF); 6156 } 6157 6158 buildVPlansWithVPRecipes(1, MaxVF); 6159 LLVM_DEBUG(printPlans(dbgs())); 6160 if (MaxVF == 1) 6161 return NoVectorization; 6162 6163 // Select the optimal vectorization factor. 6164 return CM.selectVectorizationFactor(MaxVF); 6165 } 6166 6167 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6168 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6169 << '\n'); 6170 BestVF = VF; 6171 BestUF = UF; 6172 6173 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6174 return !Plan->hasVF(VF); 6175 }); 6176 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6177 } 6178 6179 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6180 DominatorTree *DT) { 6181 // Perform the actual loop transformation. 6182 6183 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6184 VPCallbackILV CallbackILV(ILV); 6185 6186 VPTransformState State{BestVF, BestUF, LI, 6187 DT, ILV.Builder, ILV.VectorLoopValueMap, 6188 &ILV, CallbackILV}; 6189 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6190 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6191 6192 //===------------------------------------------------===// 6193 // 6194 // Notice: any optimization or new instruction that go 6195 // into the code below should also be implemented in 6196 // the cost-model. 6197 // 6198 //===------------------------------------------------===// 6199 6200 // 2. Copy and widen instructions from the old loop into the new loop. 6201 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6202 VPlans.front()->execute(&State); 6203 6204 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6205 // predication, updating analyses. 6206 ILV.fixVectorizedLoop(); 6207 } 6208 6209 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6210 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6211 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6212 6213 // We create new control-flow for the vectorized loop, so the original 6214 // condition will be dead after vectorization if it's only used by the 6215 // branch. 6216 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6217 if (Cmp && Cmp->hasOneUse()) 6218 DeadInstructions.insert(Cmp); 6219 6220 // We create new "steps" for induction variable updates to which the original 6221 // induction variables map. An original update instruction will be dead if 6222 // all its users except the induction variable are dead. 6223 for (auto &Induction : *Legal->getInductionVars()) { 6224 PHINode *Ind = Induction.first; 6225 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6226 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6227 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6228 DeadInstructions.end(); 6229 })) 6230 DeadInstructions.insert(IndUpdate); 6231 6232 // We record as "Dead" also the type-casting instructions we had identified 6233 // during induction analysis. We don't need any handling for them in the 6234 // vectorized loop because we have proven that, under a proper runtime 6235 // test guarding the vectorized loop, the value of the phi, and the casted 6236 // value of the phi, are the same. The last instruction in this casting chain 6237 // will get its scalar/vector/widened def from the scalar/vector/widened def 6238 // of the respective phi node. Any other casts in the induction def-use chain 6239 // have no other uses outside the phi update chain, and will be ignored. 6240 InductionDescriptor &IndDes = Induction.second; 6241 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6242 DeadInstructions.insert(Casts.begin(), Casts.end()); 6243 } 6244 } 6245 6246 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6247 6248 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6249 6250 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6251 Instruction::BinaryOps BinOp) { 6252 // When unrolling and the VF is 1, we only need to add a simple scalar. 6253 Type *Ty = Val->getType(); 6254 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6255 6256 if (Ty->isFloatingPointTy()) { 6257 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6258 6259 // Floating point operations had to be 'fast' to enable the unrolling. 6260 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6261 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6262 } 6263 Constant *C = ConstantInt::get(Ty, StartIdx); 6264 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6265 } 6266 6267 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6268 SmallVector<Metadata *, 4> MDs; 6269 // Reserve first location for self reference to the LoopID metadata node. 6270 MDs.push_back(nullptr); 6271 bool IsUnrollMetadata = false; 6272 MDNode *LoopID = L->getLoopID(); 6273 if (LoopID) { 6274 // First find existing loop unrolling disable metadata. 6275 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6276 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6277 if (MD) { 6278 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6279 IsUnrollMetadata = 6280 S && S->getString().startswith("llvm.loop.unroll.disable"); 6281 } 6282 MDs.push_back(LoopID->getOperand(i)); 6283 } 6284 } 6285 6286 if (!IsUnrollMetadata) { 6287 // Add runtime unroll disable metadata. 6288 LLVMContext &Context = L->getHeader()->getContext(); 6289 SmallVector<Metadata *, 1> DisableOperands; 6290 DisableOperands.push_back( 6291 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6292 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6293 MDs.push_back(DisableNode); 6294 MDNode *NewLoopID = MDNode::get(Context, MDs); 6295 // Set operand 0 to refer to the loop id itself. 6296 NewLoopID->replaceOperandWith(0, NewLoopID); 6297 L->setLoopID(NewLoopID); 6298 } 6299 } 6300 6301 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6302 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6303 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6304 bool PredicateAtRangeStart = Predicate(Range.Start); 6305 6306 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6307 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6308 Range.End = TmpVF; 6309 break; 6310 } 6311 6312 return PredicateAtRangeStart; 6313 } 6314 6315 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6316 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6317 /// of VF's starting at a given VF and extending it as much as possible. Each 6318 /// vectorization decision can potentially shorten this sub-range during 6319 /// buildVPlan(). 6320 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6321 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6322 VFRange SubRange = {VF, MaxVF + 1}; 6323 VPlans.push_back(buildVPlan(SubRange)); 6324 VF = SubRange.End; 6325 } 6326 } 6327 6328 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6329 VPlanPtr &Plan) { 6330 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6331 6332 // Look for cached value. 6333 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6334 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6335 if (ECEntryIt != EdgeMaskCache.end()) 6336 return ECEntryIt->second; 6337 6338 VPValue *SrcMask = createBlockInMask(Src, Plan); 6339 6340 // The terminator has to be a branch inst! 6341 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6342 assert(BI && "Unexpected terminator found"); 6343 6344 if (!BI->isConditional()) 6345 return EdgeMaskCache[Edge] = SrcMask; 6346 6347 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6348 assert(EdgeMask && "No Edge Mask found for condition"); 6349 6350 if (BI->getSuccessor(0) != Dst) 6351 EdgeMask = Builder.createNot(EdgeMask); 6352 6353 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6354 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6355 6356 return EdgeMaskCache[Edge] = EdgeMask; 6357 } 6358 6359 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6360 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6361 6362 // Look for cached value. 6363 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6364 if (BCEntryIt != BlockMaskCache.end()) 6365 return BCEntryIt->second; 6366 6367 // All-one mask is modelled as no-mask following the convention for masked 6368 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6369 VPValue *BlockMask = nullptr; 6370 6371 if (OrigLoop->getHeader() == BB) { 6372 if (!CM.blockNeedsPredication(BB)) 6373 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6374 6375 // Introduce the early-exit compare IV <= BTC to form header block mask. 6376 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6377 VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6378 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6379 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6380 return BlockMaskCache[BB] = BlockMask; 6381 } 6382 6383 // This is the block mask. We OR all incoming edges. 6384 for (auto *Predecessor : predecessors(BB)) { 6385 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6386 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6387 return BlockMaskCache[BB] = EdgeMask; 6388 6389 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6390 BlockMask = EdgeMask; 6391 continue; 6392 } 6393 6394 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6395 } 6396 6397 return BlockMaskCache[BB] = BlockMask; 6398 } 6399 6400 VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I, 6401 VFRange &Range, 6402 VPlanPtr &Plan) { 6403 const InterleaveGroup<Instruction> *IG = CM.getInterleavedAccessGroup(I); 6404 if (!IG) 6405 return nullptr; 6406 6407 // Now check if IG is relevant for VF's in the given range. 6408 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 6409 return [=](unsigned VF) -> bool { 6410 return (VF >= 2 && // Query is illegal for VF == 1 6411 CM.getWideningDecision(I, VF) == 6412 LoopVectorizationCostModel::CM_Interleave); 6413 }; 6414 }; 6415 if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range)) 6416 return nullptr; 6417 6418 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 6419 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 6420 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 6421 assert(I == IG->getInsertPos() && 6422 "Generating a recipe for an adjunct member of an interleave group"); 6423 6424 VPValue *Mask = nullptr; 6425 if (Legal->isMaskRequired(I)) 6426 Mask = createBlockInMask(I->getParent(), Plan); 6427 6428 return new VPInterleaveRecipe(IG, Mask); 6429 } 6430 6431 VPWidenMemoryInstructionRecipe * 6432 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6433 VPlanPtr &Plan) { 6434 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 6435 return nullptr; 6436 6437 auto willWiden = [&](unsigned VF) -> bool { 6438 if (VF == 1) 6439 return false; 6440 if (CM.isScalarAfterVectorization(I, VF) || 6441 CM.isProfitableToScalarize(I, VF)) 6442 return false; 6443 LoopVectorizationCostModel::InstWidening Decision = 6444 CM.getWideningDecision(I, VF); 6445 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6446 "CM decision should be taken at this point."); 6447 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 6448 "Interleave memory opportunity should be caught earlier."); 6449 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6450 }; 6451 6452 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6453 return nullptr; 6454 6455 VPValue *Mask = nullptr; 6456 if (Legal->isMaskRequired(I)) 6457 Mask = createBlockInMask(I->getParent(), Plan); 6458 6459 return new VPWidenMemoryInstructionRecipe(*I, Mask); 6460 } 6461 6462 VPWidenIntOrFpInductionRecipe * 6463 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) { 6464 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 6465 // Check if this is an integer or fp induction. If so, build the recipe that 6466 // produces its scalar and vector values. 6467 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 6468 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6469 II.getKind() == InductionDescriptor::IK_FpInduction) 6470 return new VPWidenIntOrFpInductionRecipe(Phi); 6471 6472 return nullptr; 6473 } 6474 6475 // Optimize the special case where the source is a constant integer 6476 // induction variable. Notice that we can only optimize the 'trunc' case 6477 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6478 // (c) other casts depend on pointer size. 6479 6480 // Determine whether \p K is a truncation based on an induction variable that 6481 // can be optimized. 6482 auto isOptimizableIVTruncate = 6483 [&](Instruction *K) -> std::function<bool(unsigned)> { 6484 return 6485 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6486 }; 6487 6488 if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange( 6489 isOptimizableIVTruncate(I), Range)) 6490 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6491 cast<TruncInst>(I)); 6492 return nullptr; 6493 } 6494 6495 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) { 6496 PHINode *Phi = dyn_cast<PHINode>(I); 6497 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 6498 return nullptr; 6499 6500 // We know that all PHIs in non-header blocks are converted into selects, so 6501 // we don't have to worry about the insertion order and we can just use the 6502 // builder. At this point we generate the predication tree. There may be 6503 // duplications since this is a simple recursive scan, but future 6504 // optimizations will clean it up. 6505 6506 SmallVector<VPValue *, 2> Masks; 6507 unsigned NumIncoming = Phi->getNumIncomingValues(); 6508 for (unsigned In = 0; In < NumIncoming; In++) { 6509 VPValue *EdgeMask = 6510 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6511 assert((EdgeMask || NumIncoming == 1) && 6512 "Multiple predecessors with one having a full mask"); 6513 if (EdgeMask) 6514 Masks.push_back(EdgeMask); 6515 } 6516 return new VPBlendRecipe(Phi, Masks); 6517 } 6518 6519 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 6520 VFRange &Range) { 6521 6522 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6523 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6524 6525 if (IsPredicated) 6526 return false; 6527 6528 auto IsVectorizableOpcode = [](unsigned Opcode) { 6529 switch (Opcode) { 6530 case Instruction::Add: 6531 case Instruction::And: 6532 case Instruction::AShr: 6533 case Instruction::BitCast: 6534 case Instruction::Br: 6535 case Instruction::Call: 6536 case Instruction::FAdd: 6537 case Instruction::FCmp: 6538 case Instruction::FDiv: 6539 case Instruction::FMul: 6540 case Instruction::FPExt: 6541 case Instruction::FPToSI: 6542 case Instruction::FPToUI: 6543 case Instruction::FPTrunc: 6544 case Instruction::FRem: 6545 case Instruction::FSub: 6546 case Instruction::GetElementPtr: 6547 case Instruction::ICmp: 6548 case Instruction::IntToPtr: 6549 case Instruction::Load: 6550 case Instruction::LShr: 6551 case Instruction::Mul: 6552 case Instruction::Or: 6553 case Instruction::PHI: 6554 case Instruction::PtrToInt: 6555 case Instruction::SDiv: 6556 case Instruction::Select: 6557 case Instruction::SExt: 6558 case Instruction::Shl: 6559 case Instruction::SIToFP: 6560 case Instruction::SRem: 6561 case Instruction::Store: 6562 case Instruction::Sub: 6563 case Instruction::Trunc: 6564 case Instruction::UDiv: 6565 case Instruction::UIToFP: 6566 case Instruction::URem: 6567 case Instruction::Xor: 6568 case Instruction::ZExt: 6569 return true; 6570 } 6571 return false; 6572 }; 6573 6574 if (!IsVectorizableOpcode(I->getOpcode())) 6575 return false; 6576 6577 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6578 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6579 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6580 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6581 return false; 6582 } 6583 6584 auto willWiden = [&](unsigned VF) -> bool { 6585 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 6586 CM.isProfitableToScalarize(I, VF))) 6587 return false; 6588 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6589 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6590 // The following case may be scalarized depending on the VF. 6591 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6592 // version of the instruction. 6593 // Is it beneficial to perform intrinsic call compared to lib call? 6594 bool NeedToScalarize; 6595 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 6596 bool UseVectorIntrinsic = 6597 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 6598 return UseVectorIntrinsic || !NeedToScalarize; 6599 } 6600 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 6601 assert(CM.getWideningDecision(I, VF) == 6602 LoopVectorizationCostModel::CM_Scalarize && 6603 "Memory widening decisions should have been taken care by now"); 6604 return false; 6605 } 6606 return true; 6607 }; 6608 6609 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6610 return false; 6611 6612 // Success: widen this instruction. We optimize the common case where 6613 // consecutive instructions can be represented by a single recipe. 6614 if (!VPBB->empty()) { 6615 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 6616 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 6617 return true; 6618 } 6619 6620 VPBB->appendRecipe(new VPWidenRecipe(I)); 6621 return true; 6622 } 6623 6624 VPBasicBlock *VPRecipeBuilder::handleReplication( 6625 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 6626 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 6627 VPlanPtr &Plan) { 6628 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 6629 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 6630 Range); 6631 6632 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6633 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6634 6635 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 6636 6637 // Find if I uses a predicated instruction. If so, it will use its scalar 6638 // value. Avoid hoisting the insert-element which packs the scalar value into 6639 // a vector value, as that happens iff all users use the vector value. 6640 for (auto &Op : I->operands()) 6641 if (auto *PredInst = dyn_cast<Instruction>(Op)) 6642 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 6643 PredInst2Recipe[PredInst]->setAlsoPack(false); 6644 6645 // Finalize the recipe for Instr, first if it is not predicated. 6646 if (!IsPredicated) { 6647 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 6648 VPBB->appendRecipe(Recipe); 6649 return VPBB; 6650 } 6651 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 6652 assert(VPBB->getSuccessors().empty() && 6653 "VPBB has successors when handling predicated replication."); 6654 // Record predicated instructions for above packing optimizations. 6655 PredInst2Recipe[I] = Recipe; 6656 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 6657 VPBlockUtils::insertBlockAfter(Region, VPBB); 6658 auto *RegSucc = new VPBasicBlock(); 6659 VPBlockUtils::insertBlockAfter(RegSucc, Region); 6660 return RegSucc; 6661 } 6662 6663 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 6664 VPRecipeBase *PredRecipe, 6665 VPlanPtr &Plan) { 6666 // Instructions marked for predication are replicated and placed under an 6667 // if-then construct to prevent side-effects. 6668 6669 // Generate recipes to compute the block mask for this region. 6670 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 6671 6672 // Build the triangular if-then region. 6673 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 6674 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 6675 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 6676 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 6677 auto *PHIRecipe = 6678 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 6679 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 6680 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 6681 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 6682 6683 // Note: first set Entry as region entry and then connect successors starting 6684 // from it in order, to propagate the "parent" of each VPBasicBlock. 6685 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 6686 VPBlockUtils::connectBlocks(Pred, Exit); 6687 6688 return Region; 6689 } 6690 6691 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range, 6692 VPlanPtr &Plan, VPBasicBlock *VPBB) { 6693 VPRecipeBase *Recipe = nullptr; 6694 // Check if Instr should belong to an interleave memory recipe, or already 6695 // does. In the latter case Instr is irrelevant. 6696 if ((Recipe = tryToInterleaveMemory(Instr, Range, Plan))) { 6697 VPBB->appendRecipe(Recipe); 6698 return true; 6699 } 6700 6701 // Check if Instr is a memory operation that should be widened. 6702 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 6703 VPBB->appendRecipe(Recipe); 6704 return true; 6705 } 6706 6707 // Check if Instr should form some PHI recipe. 6708 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 6709 VPBB->appendRecipe(Recipe); 6710 return true; 6711 } 6712 if ((Recipe = tryToBlend(Instr, Plan))) { 6713 VPBB->appendRecipe(Recipe); 6714 return true; 6715 } 6716 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 6717 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 6718 return true; 6719 } 6720 6721 // Check if Instr is to be widened by a general VPWidenRecipe, after 6722 // having first checked for specific widening recipes that deal with 6723 // Interleave Groups, Inductions and Phi nodes. 6724 if (tryToWiden(Instr, VPBB, Range)) 6725 return true; 6726 6727 return false; 6728 } 6729 6730 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 6731 unsigned MaxVF) { 6732 assert(OrigLoop->empty() && "Inner loop expected."); 6733 6734 // Collect conditions feeding internal conditional branches; they need to be 6735 // represented in VPlan for it to model masking. 6736 SmallPtrSet<Value *, 1> NeedDef; 6737 6738 auto *Latch = OrigLoop->getLoopLatch(); 6739 for (BasicBlock *BB : OrigLoop->blocks()) { 6740 if (BB == Latch) 6741 continue; 6742 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 6743 if (Branch && Branch->isConditional()) 6744 NeedDef.insert(Branch->getCondition()); 6745 } 6746 6747 // If the tail is to be folded by masking, the primary induction variable 6748 // needs to be represented in VPlan for it to model early-exit masking. 6749 if (CM.foldTailByMasking()) 6750 NeedDef.insert(Legal->getPrimaryInduction()); 6751 6752 // Collect instructions from the original loop that will become trivially dead 6753 // in the vectorized loop. We don't need to vectorize these instructions. For 6754 // example, original induction update instructions can become dead because we 6755 // separately emit induction "steps" when generating code for the new loop. 6756 // Similarly, we create a new latch condition when setting up the structure 6757 // of the new loop, so the old one can become dead. 6758 SmallPtrSet<Instruction *, 4> DeadInstructions; 6759 collectTriviallyDeadInstructions(DeadInstructions); 6760 6761 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6762 VFRange SubRange = {VF, MaxVF + 1}; 6763 VPlans.push_back( 6764 buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions)); 6765 VF = SubRange.End; 6766 } 6767 } 6768 6769 LoopVectorizationPlanner::VPlanPtr 6770 LoopVectorizationPlanner::buildVPlanWithVPRecipes( 6771 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 6772 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6773 // Hold a mapping from predicated instructions to their recipes, in order to 6774 // fix their AlsoPack behavior if a user is determined to replicate and use a 6775 // scalar instead of vector value. 6776 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 6777 6778 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 6779 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 6780 6781 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 6782 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 6783 auto Plan = llvm::make_unique<VPlan>(VPBB); 6784 6785 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, TTI, Legal, CM, Builder); 6786 // Represent values that will have defs inside VPlan. 6787 for (Value *V : NeedDef) 6788 Plan->addVPValue(V); 6789 6790 // Scan the body of the loop in a topological order to visit each basic block 6791 // after having visited its predecessor basic blocks. 6792 LoopBlocksDFS DFS(OrigLoop); 6793 DFS.perform(LI); 6794 6795 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6796 // Relevant instructions from basic block BB will be grouped into VPRecipe 6797 // ingredients and fill a new VPBasicBlock. 6798 unsigned VPBBsForBB = 0; 6799 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 6800 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 6801 VPBB = FirstVPBBForBB; 6802 Builder.setInsertPoint(VPBB); 6803 6804 std::vector<Instruction *> Ingredients; 6805 6806 // Organize the ingredients to vectorize from current basic block in the 6807 // right order. 6808 for (Instruction &I : BB->instructionsWithoutDebug()) { 6809 Instruction *Instr = &I; 6810 6811 // First filter out irrelevant instructions, to ensure no recipes are 6812 // built for them. 6813 if (isa<BranchInst>(Instr) || 6814 DeadInstructions.find(Instr) != DeadInstructions.end()) 6815 continue; 6816 6817 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 6818 // member of the IG, do not construct any Recipe for it. 6819 const InterleaveGroup<Instruction> *IG = 6820 CM.getInterleavedAccessGroup(Instr); 6821 if (IG && Instr != IG->getInsertPos() && 6822 Range.Start >= 2 && // Query is illegal for VF == 1 6823 CM.getWideningDecision(Instr, Range.Start) == 6824 LoopVectorizationCostModel::CM_Interleave) { 6825 auto SinkCandidate = SinkAfterInverse.find(Instr); 6826 if (SinkCandidate != SinkAfterInverse.end()) 6827 Ingredients.push_back(SinkCandidate->second); 6828 continue; 6829 } 6830 6831 // Move instructions to handle first-order recurrences, step 1: avoid 6832 // handling this instruction until after we've handled the instruction it 6833 // should follow. 6834 auto SAIt = SinkAfter.find(Instr); 6835 if (SAIt != SinkAfter.end()) { 6836 LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" 6837 << *SAIt->second 6838 << " to vectorize a 1st order recurrence.\n"); 6839 SinkAfterInverse[SAIt->second] = Instr; 6840 continue; 6841 } 6842 6843 Ingredients.push_back(Instr); 6844 6845 // Move instructions to handle first-order recurrences, step 2: push the 6846 // instruction to be sunk at its insertion point. 6847 auto SAInvIt = SinkAfterInverse.find(Instr); 6848 if (SAInvIt != SinkAfterInverse.end()) 6849 Ingredients.push_back(SAInvIt->second); 6850 } 6851 6852 // Introduce each ingredient into VPlan. 6853 for (Instruction *Instr : Ingredients) { 6854 if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB)) 6855 continue; 6856 6857 // Otherwise, if all widening options failed, Instruction is to be 6858 // replicated. This may create a successor for VPBB. 6859 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 6860 Instr, Range, VPBB, PredInst2Recipe, Plan); 6861 if (NextVPBB != VPBB) { 6862 VPBB = NextVPBB; 6863 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 6864 : ""); 6865 } 6866 } 6867 } 6868 6869 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 6870 // may also be empty, such as the last one VPBB, reflecting original 6871 // basic-blocks with no recipes. 6872 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 6873 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 6874 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 6875 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 6876 delete PreEntry; 6877 6878 std::string PlanName; 6879 raw_string_ostream RSO(PlanName); 6880 unsigned VF = Range.Start; 6881 Plan->addVF(VF); 6882 RSO << "Initial VPlan for VF={" << VF; 6883 for (VF *= 2; VF < Range.End; VF *= 2) { 6884 Plan->addVF(VF); 6885 RSO << "," << VF; 6886 } 6887 RSO << "},UF>=1"; 6888 RSO.flush(); 6889 Plan->setName(PlanName); 6890 6891 return Plan; 6892 } 6893 6894 LoopVectorizationPlanner::VPlanPtr 6895 LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 6896 // Outer loop handling: They may require CFG and instruction level 6897 // transformations before even evaluating whether vectorization is profitable. 6898 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6899 // the vectorization pipeline. 6900 assert(!OrigLoop->empty()); 6901 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6902 6903 // Create new empty VPlan 6904 auto Plan = llvm::make_unique<VPlan>(); 6905 6906 // Build hierarchical CFG 6907 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 6908 HCFGBuilder.buildHierarchicalCFG(); 6909 6910 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 6911 Plan->addVF(VF); 6912 6913 if (EnableVPlanPredication) { 6914 VPlanPredicator VPP(*Plan); 6915 VPP.predicate(); 6916 6917 // Avoid running transformation to recipes until masked code generation in 6918 // VPlan-native path is in place. 6919 return Plan; 6920 } 6921 6922 SmallPtrSet<Instruction *, 1> DeadInstructions; 6923 VPlanHCFGTransforms::VPInstructionsToVPRecipes( 6924 Plan, Legal->getInductionVars(), DeadInstructions); 6925 6926 return Plan; 6927 } 6928 6929 Value* LoopVectorizationPlanner::VPCallbackILV:: 6930 getOrCreateVectorValues(Value *V, unsigned Part) { 6931 return ILV.getOrCreateVectorValue(V, Part); 6932 } 6933 6934 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 6935 O << " +\n" 6936 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 6937 IG->getInsertPos()->printAsOperand(O, false); 6938 if (User) { 6939 O << ", "; 6940 User->getOperand(0)->printAsOperand(O); 6941 } 6942 O << "\\l\""; 6943 for (unsigned i = 0; i < IG->getFactor(); ++i) 6944 if (Instruction *I = IG->getMember(i)) 6945 O << " +\n" 6946 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 6947 } 6948 6949 void VPWidenRecipe::execute(VPTransformState &State) { 6950 for (auto &Instr : make_range(Begin, End)) 6951 State.ILV->widenInstruction(Instr); 6952 } 6953 6954 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 6955 assert(!State.Instance && "Int or FP induction being replicated."); 6956 State.ILV->widenIntOrFpInduction(IV, Trunc); 6957 } 6958 6959 void VPWidenPHIRecipe::execute(VPTransformState &State) { 6960 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 6961 } 6962 6963 void VPBlendRecipe::execute(VPTransformState &State) { 6964 State.ILV->setDebugLocFromInst(State.Builder, Phi); 6965 // We know that all PHIs in non-header blocks are converted into 6966 // selects, so we don't have to worry about the insertion order and we 6967 // can just use the builder. 6968 // At this point we generate the predication tree. There may be 6969 // duplications since this is a simple recursive scan, but future 6970 // optimizations will clean it up. 6971 6972 unsigned NumIncoming = Phi->getNumIncomingValues(); 6973 6974 assert((User || NumIncoming == 1) && 6975 "Multiple predecessors with predecessors having a full mask"); 6976 // Generate a sequence of selects of the form: 6977 // SELECT(Mask3, In3, 6978 // SELECT(Mask2, In2, 6979 // ( ...))) 6980 InnerLoopVectorizer::VectorParts Entry(State.UF); 6981 for (unsigned In = 0; In < NumIncoming; ++In) { 6982 for (unsigned Part = 0; Part < State.UF; ++Part) { 6983 // We might have single edge PHIs (blocks) - use an identity 6984 // 'select' for the first PHI operand. 6985 Value *In0 = 6986 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 6987 if (In == 0) 6988 Entry[Part] = In0; // Initialize with the first incoming value. 6989 else { 6990 // Select between the current value and the previous incoming edge 6991 // based on the incoming mask. 6992 Value *Cond = State.get(User->getOperand(In), Part); 6993 Entry[Part] = 6994 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 6995 } 6996 } 6997 } 6998 for (unsigned Part = 0; Part < State.UF; ++Part) 6999 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7000 } 7001 7002 void VPInterleaveRecipe::execute(VPTransformState &State) { 7003 assert(!State.Instance && "Interleave group being replicated."); 7004 if (!User) 7005 return State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 7006 7007 // Last (and currently only) operand is a mask. 7008 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7009 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7010 for (unsigned Part = 0; Part < State.UF; ++Part) 7011 MaskValues[Part] = State.get(Mask, Part); 7012 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), &MaskValues); 7013 } 7014 7015 void VPReplicateRecipe::execute(VPTransformState &State) { 7016 if (State.Instance) { // Generate a single instance. 7017 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 7018 // Insert scalar instance packing it into a vector. 7019 if (AlsoPack && State.VF > 1) { 7020 // If we're constructing lane 0, initialize to start from undef. 7021 if (State.Instance->Lane == 0) { 7022 Value *Undef = 7023 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7024 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7025 } 7026 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7027 } 7028 return; 7029 } 7030 7031 // Generate scalar instances for all VF lanes of all UF parts, unless the 7032 // instruction is uniform inwhich case generate only the first lane for each 7033 // of the UF parts. 7034 unsigned EndLane = IsUniform ? 1 : State.VF; 7035 for (unsigned Part = 0; Part < State.UF; ++Part) 7036 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7037 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7038 } 7039 7040 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7041 assert(State.Instance && "Branch on Mask works only on single instance."); 7042 7043 unsigned Part = State.Instance->Part; 7044 unsigned Lane = State.Instance->Lane; 7045 7046 Value *ConditionBit = nullptr; 7047 if (!User) // Block in mask is all-one. 7048 ConditionBit = State.Builder.getTrue(); 7049 else { 7050 VPValue *BlockInMask = User->getOperand(0); 7051 ConditionBit = State.get(BlockInMask, Part); 7052 if (ConditionBit->getType()->isVectorTy()) 7053 ConditionBit = State.Builder.CreateExtractElement( 7054 ConditionBit, State.Builder.getInt32(Lane)); 7055 } 7056 7057 // Replace the temporary unreachable terminator with a new conditional branch, 7058 // whose two destinations will be set later when they are created. 7059 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7060 assert(isa<UnreachableInst>(CurrentTerminator) && 7061 "Expected to replace unreachable terminator with conditional branch."); 7062 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7063 CondBr->setSuccessor(0, nullptr); 7064 ReplaceInstWithInst(CurrentTerminator, CondBr); 7065 } 7066 7067 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7068 assert(State.Instance && "Predicated instruction PHI works per instance."); 7069 Instruction *ScalarPredInst = cast<Instruction>( 7070 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7071 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7072 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7073 assert(PredicatingBB && "Predicated block has no single predecessor."); 7074 7075 // By current pack/unpack logic we need to generate only a single phi node: if 7076 // a vector value for the predicated instruction exists at this point it means 7077 // the instruction has vector users only, and a phi for the vector value is 7078 // needed. In this case the recipe of the predicated instruction is marked to 7079 // also do that packing, thereby "hoisting" the insert-element sequence. 7080 // Otherwise, a phi node for the scalar value is needed. 7081 unsigned Part = State.Instance->Part; 7082 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7083 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7084 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7085 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7086 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7087 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7088 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7089 } else { 7090 Type *PredInstType = PredInst->getType(); 7091 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7092 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7093 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7094 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7095 } 7096 } 7097 7098 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7099 if (!User) 7100 return State.ILV->vectorizeMemoryInstruction(&Instr); 7101 7102 // Last (and currently only) operand is a mask. 7103 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7104 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7105 for (unsigned Part = 0; Part < State.UF; ++Part) 7106 MaskValues[Part] = State.get(Mask, Part); 7107 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 7108 } 7109 7110 // Process the loop in the VPlan-native vectorization path. This path builds 7111 // VPlan upfront in the vectorization pipeline, which allows to apply 7112 // VPlan-to-VPlan transformations from the very beginning without modifying the 7113 // input LLVM IR. 7114 static bool processLoopInVPlanNativePath( 7115 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7116 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7117 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7118 OptimizationRemarkEmitter *ORE, LoopVectorizeHints &Hints) { 7119 7120 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7121 Function *F = L->getHeader()->getParent(); 7122 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7123 LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7124 &Hints, IAI); 7125 // Use the planner for outer loop vectorization. 7126 // TODO: CM is not used at this point inside the planner. Turn CM into an 7127 // optional argument if we don't need it in the future. 7128 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM); 7129 7130 // Get user vectorization factor. 7131 unsigned UserVF = Hints.getWidth(); 7132 7133 // Check the function attributes to find out if this function should be 7134 // optimized for size. 7135 bool OptForSize = 7136 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7137 7138 // Plan how to best vectorize, return the best VF and its cost. 7139 VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF); 7140 7141 // If we are stress testing VPlan builds, do not attempt to generate vector 7142 // code. Masked vector code generation support will follow soon. 7143 if (VPlanBuildStressTest || EnableVPlanPredication) 7144 return false; 7145 7146 LVP.setBestPlan(VF.Width, 1); 7147 7148 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, UserVF, 1, LVL, 7149 &CM); 7150 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7151 << L->getHeader()->getParent()->getName() << "\"\n"); 7152 LVP.executePlan(LB, DT); 7153 7154 // Mark the loop as already vectorized to avoid vectorizing again. 7155 Hints.setAlreadyVectorized(); 7156 7157 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7158 return true; 7159 } 7160 7161 bool LoopVectorizePass::processLoop(Loop *L) { 7162 assert((EnableVPlanNativePath || L->empty()) && 7163 "VPlan-native path is not enabled. Only process inner loops."); 7164 7165 #ifndef NDEBUG 7166 const std::string DebugLocStr = getDebugLocString(L); 7167 #endif /* NDEBUG */ 7168 7169 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7170 << L->getHeader()->getParent()->getName() << "\" from " 7171 << DebugLocStr << "\n"); 7172 7173 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 7174 7175 LLVM_DEBUG( 7176 dbgs() << "LV: Loop hints:" 7177 << " force=" 7178 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7179 ? "disabled" 7180 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7181 ? "enabled" 7182 : "?")) 7183 << " width=" << Hints.getWidth() 7184 << " unroll=" << Hints.getInterleave() << "\n"); 7185 7186 // Function containing loop 7187 Function *F = L->getHeader()->getParent(); 7188 7189 // Looking at the diagnostic output is the only way to determine if a loop 7190 // was vectorized (other than looking at the IR or machine code), so it 7191 // is important to generate an optimization remark for each loop. Most of 7192 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7193 // generated as OptimizationRemark and OptimizationRemarkMissed are 7194 // less verbose reporting vectorized loops and unvectorized loops that may 7195 // benefit from vectorization, respectively. 7196 7197 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 7198 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7199 return false; 7200 } 7201 7202 PredicatedScalarEvolution PSE(*SE, *L); 7203 7204 // Check if it is legal to vectorize the loop. 7205 LoopVectorizationRequirements Requirements(*ORE); 7206 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE, 7207 &Requirements, &Hints, DB, AC); 7208 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7209 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7210 Hints.emitRemarkWithHints(); 7211 return false; 7212 } 7213 7214 // Check the function attributes to find out if this function should be 7215 // optimized for size. 7216 bool OptForSize = 7217 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7218 7219 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7220 // here. They may require CFG and instruction level transformations before 7221 // even evaluating whether vectorization is profitable. Since we cannot modify 7222 // the incoming IR, we need to build VPlan upfront in the vectorization 7223 // pipeline. 7224 if (!L->empty()) 7225 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7226 ORE, Hints); 7227 7228 assert(L->empty() && "Inner loop expected."); 7229 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7230 // count by optimizing for size, to minimize overheads. 7231 // Prefer constant trip counts over profile data, over upper bound estimate. 7232 unsigned ExpectedTC = 0; 7233 bool HasExpectedTC = false; 7234 if (const SCEVConstant *ConstExits = 7235 dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) { 7236 const APInt &ExitsCount = ConstExits->getAPInt(); 7237 // We are interested in small values for ExpectedTC. Skip over those that 7238 // can't fit an unsigned. 7239 if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) { 7240 ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1; 7241 HasExpectedTC = true; 7242 } 7243 } 7244 // ExpectedTC may be large because it's bound by a variable. Check 7245 // profiling information to validate we should vectorize. 7246 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 7247 auto EstimatedTC = getLoopEstimatedTripCount(L); 7248 if (EstimatedTC) { 7249 ExpectedTC = *EstimatedTC; 7250 HasExpectedTC = true; 7251 } 7252 } 7253 if (!HasExpectedTC) { 7254 ExpectedTC = SE->getSmallConstantMaxTripCount(L); 7255 HasExpectedTC = (ExpectedTC > 0); 7256 } 7257 7258 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 7259 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7260 << "This loop is worth vectorizing only if no scalar " 7261 << "iteration overheads are incurred."); 7262 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7263 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7264 else { 7265 LLVM_DEBUG(dbgs() << "\n"); 7266 // Loops with a very small trip count are considered for vectorization 7267 // under OptForSize, thereby making sure the cost of their loop body is 7268 // dominant, free of runtime guards and scalar iteration overheads. 7269 OptForSize = true; 7270 } 7271 } 7272 7273 // Check the function attributes to see if implicit floats are allowed. 7274 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7275 // an integer loop and the vector instructions selected are purely integer 7276 // vector instructions? 7277 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7278 LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7279 "attribute is used.\n"); 7280 ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7281 "NoImplicitFloat", L) 7282 << "loop not vectorized due to NoImplicitFloat attribute"); 7283 Hints.emitRemarkWithHints(); 7284 return false; 7285 } 7286 7287 // Check if the target supports potentially unsafe FP vectorization. 7288 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7289 // for the target we're vectorizing for, to make sure none of the 7290 // additional fp-math flags can help. 7291 if (Hints.isPotentiallyUnsafe() && 7292 TTI->isFPVectorizationPotentiallyUnsafe()) { 7293 LLVM_DEBUG( 7294 dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7295 ORE->emit( 7296 createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7297 << "loop not vectorized due to unsafe FP support."); 7298 Hints.emitRemarkWithHints(); 7299 return false; 7300 } 7301 7302 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7303 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7304 7305 // If an override option has been passed in for interleaved accesses, use it. 7306 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7307 UseInterleaved = EnableInterleavedMemAccesses; 7308 7309 // Analyze interleaved memory accesses. 7310 if (UseInterleaved) { 7311 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7312 } 7313 7314 // Use the cost model. 7315 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7316 &Hints, IAI); 7317 CM.collectValuesToIgnore(); 7318 7319 // Use the planner for vectorization. 7320 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 7321 7322 // Get user vectorization factor. 7323 unsigned UserVF = Hints.getWidth(); 7324 7325 // Plan how to best vectorize, return the best VF and its cost. 7326 VectorizationFactor VF = LVP.plan(OptForSize, UserVF); 7327 7328 // Select the interleave count. 7329 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7330 7331 // Get user interleave count. 7332 unsigned UserIC = Hints.getInterleave(); 7333 7334 // Identify the diagnostic messages that should be produced. 7335 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7336 bool VectorizeLoop = true, InterleaveLoop = true; 7337 if (Requirements.doesNotMeet(F, L, Hints)) { 7338 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7339 "requirements.\n"); 7340 Hints.emitRemarkWithHints(); 7341 return false; 7342 } 7343 7344 if (VF.Width == 1) { 7345 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7346 VecDiagMsg = std::make_pair( 7347 "VectorizationNotBeneficial", 7348 "the cost-model indicates that vectorization is not beneficial"); 7349 VectorizeLoop = false; 7350 } 7351 7352 if (IC == 1 && UserIC <= 1) { 7353 // Tell the user interleaving is not beneficial. 7354 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7355 IntDiagMsg = std::make_pair( 7356 "InterleavingNotBeneficial", 7357 "the cost-model indicates that interleaving is not beneficial"); 7358 InterleaveLoop = false; 7359 if (UserIC == 1) { 7360 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7361 IntDiagMsg.second += 7362 " and is explicitly disabled or interleave count is set to 1"; 7363 } 7364 } else if (IC > 1 && UserIC == 1) { 7365 // Tell the user interleaving is beneficial, but it explicitly disabled. 7366 LLVM_DEBUG( 7367 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7368 IntDiagMsg = std::make_pair( 7369 "InterleavingBeneficialButDisabled", 7370 "the cost-model indicates that interleaving is beneficial " 7371 "but is explicitly disabled or interleave count is set to 1"); 7372 InterleaveLoop = false; 7373 } 7374 7375 // Override IC if user provided an interleave count. 7376 IC = UserIC > 0 ? UserIC : IC; 7377 7378 // Emit diagnostic messages, if any. 7379 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7380 if (!VectorizeLoop && !InterleaveLoop) { 7381 // Do not vectorize or interleaving the loop. 7382 ORE->emit([&]() { 7383 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7384 L->getStartLoc(), L->getHeader()) 7385 << VecDiagMsg.second; 7386 }); 7387 ORE->emit([&]() { 7388 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7389 L->getStartLoc(), L->getHeader()) 7390 << IntDiagMsg.second; 7391 }); 7392 return false; 7393 } else if (!VectorizeLoop && InterleaveLoop) { 7394 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7395 ORE->emit([&]() { 7396 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7397 L->getStartLoc(), L->getHeader()) 7398 << VecDiagMsg.second; 7399 }); 7400 } else if (VectorizeLoop && !InterleaveLoop) { 7401 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7402 << ") in " << DebugLocStr << '\n'); 7403 ORE->emit([&]() { 7404 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7405 L->getStartLoc(), L->getHeader()) 7406 << IntDiagMsg.second; 7407 }); 7408 } else if (VectorizeLoop && InterleaveLoop) { 7409 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7410 << ") in " << DebugLocStr << '\n'); 7411 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7412 } 7413 7414 LVP.setBestPlan(VF.Width, IC); 7415 7416 using namespace ore; 7417 bool DisableRuntimeUnroll = false; 7418 MDNode *OrigLoopID = L->getLoopID(); 7419 7420 if (!VectorizeLoop) { 7421 assert(IC > 1 && "interleave count should not be 1 or 0"); 7422 // If we decided that it is not legal to vectorize the loop, then 7423 // interleave it. 7424 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7425 &CM); 7426 LVP.executePlan(Unroller, DT); 7427 7428 ORE->emit([&]() { 7429 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7430 L->getHeader()) 7431 << "interleaved loop (interleaved count: " 7432 << NV("InterleaveCount", IC) << ")"; 7433 }); 7434 } else { 7435 // If we decided that it is *legal* to vectorize the loop, then do it. 7436 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7437 &LVL, &CM); 7438 LVP.executePlan(LB, DT); 7439 ++LoopsVectorized; 7440 7441 // Add metadata to disable runtime unrolling a scalar loop when there are 7442 // no runtime checks about strides and memory. A scalar loop that is 7443 // rarely used is not worth unrolling. 7444 if (!LB.areSafetyChecksAdded()) 7445 DisableRuntimeUnroll = true; 7446 7447 // Report the vectorization decision. 7448 ORE->emit([&]() { 7449 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7450 L->getHeader()) 7451 << "vectorized loop (vectorization width: " 7452 << NV("VectorizationFactor", VF.Width) 7453 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7454 }); 7455 } 7456 7457 Optional<MDNode *> RemainderLoopID = 7458 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7459 LLVMLoopVectorizeFollowupEpilogue}); 7460 if (RemainderLoopID.hasValue()) { 7461 L->setLoopID(RemainderLoopID.getValue()); 7462 } else { 7463 if (DisableRuntimeUnroll) 7464 AddRuntimeUnrollDisableMetaData(L); 7465 7466 // Mark the loop as already vectorized to avoid vectorizing again. 7467 Hints.setAlreadyVectorized(); 7468 } 7469 7470 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7471 return true; 7472 } 7473 7474 bool LoopVectorizePass::runImpl( 7475 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7476 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7477 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7478 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7479 OptimizationRemarkEmitter &ORE_) { 7480 SE = &SE_; 7481 LI = &LI_; 7482 TTI = &TTI_; 7483 DT = &DT_; 7484 BFI = &BFI_; 7485 TLI = TLI_; 7486 AA = &AA_; 7487 AC = &AC_; 7488 GetLAA = &GetLAA_; 7489 DB = &DB_; 7490 ORE = &ORE_; 7491 7492 // Don't attempt if 7493 // 1. the target claims to have no vector registers, and 7494 // 2. interleaving won't help ILP. 7495 // 7496 // The second condition is necessary because, even if the target has no 7497 // vector registers, loop vectorization may still enable scalar 7498 // interleaving. 7499 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7500 return false; 7501 7502 bool Changed = false; 7503 7504 // The vectorizer requires loops to be in simplified form. 7505 // Since simplification may add new inner loops, it has to run before the 7506 // legality and profitability checks. This means running the loop vectorizer 7507 // will simplify all loops, regardless of whether anything end up being 7508 // vectorized. 7509 for (auto &L : *LI) 7510 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 7511 7512 // Build up a worklist of inner-loops to vectorize. This is necessary as 7513 // the act of vectorizing or partially unrolling a loop creates new loops 7514 // and can invalidate iterators across the loops. 7515 SmallVector<Loop *, 8> Worklist; 7516 7517 for (Loop *L : *LI) 7518 collectSupportedLoops(*L, LI, ORE, Worklist); 7519 7520 LoopsAnalyzed += Worklist.size(); 7521 7522 // Now walk the identified inner loops. 7523 while (!Worklist.empty()) { 7524 Loop *L = Worklist.pop_back_val(); 7525 7526 // For the inner loops we actually process, form LCSSA to simplify the 7527 // transform. 7528 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7529 7530 Changed |= processLoop(L); 7531 } 7532 7533 // Process each loop nest in the function. 7534 return Changed; 7535 } 7536 7537 PreservedAnalyses LoopVectorizePass::run(Function &F, 7538 FunctionAnalysisManager &AM) { 7539 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7540 auto &LI = AM.getResult<LoopAnalysis>(F); 7541 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7542 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7543 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7544 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7545 auto &AA = AM.getResult<AAManager>(F); 7546 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7547 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7548 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7549 7550 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7551 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7552 [&](Loop &L) -> const LoopAccessInfo & { 7553 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr}; 7554 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7555 }; 7556 bool Changed = 7557 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 7558 if (!Changed) 7559 return PreservedAnalyses::all(); 7560 PreservedAnalyses PA; 7561 7562 // We currently do not preserve loopinfo/dominator analyses with outer loop 7563 // vectorization. Until this is addressed, mark these analyses as preserved 7564 // only for non-VPlan-native path. 7565 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 7566 if (!EnableVPlanNativePath) { 7567 PA.preserve<LoopAnalysis>(); 7568 PA.preserve<DominatorTreeAnalysis>(); 7569 } 7570 PA.preserve<BasicAA>(); 7571 PA.preserve<GlobalsAA>(); 7572 return PA; 7573 } 7574