1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlanHCFGBuilder.h" 60 #include "VPlanHCFGTransforms.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DenseMapInfo.h" 65 #include "llvm/ADT/Hashing.h" 66 #include "llvm/ADT/MapVector.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/SetVector.h" 71 #include "llvm/ADT/SmallPtrSet.h" 72 #include "llvm/ADT/SmallVector.h" 73 #include "llvm/ADT/Statistic.h" 74 #include "llvm/ADT/StringRef.h" 75 #include "llvm/ADT/Twine.h" 76 #include "llvm/ADT/iterator_range.h" 77 #include "llvm/Analysis/AssumptionCache.h" 78 #include "llvm/Analysis/BasicAliasAnalysis.h" 79 #include "llvm/Analysis/BlockFrequencyInfo.h" 80 #include "llvm/Analysis/CFG.h" 81 #include "llvm/Analysis/CodeMetrics.h" 82 #include "llvm/Analysis/DemandedBits.h" 83 #include "llvm/Analysis/GlobalsModRef.h" 84 #include "llvm/Analysis/LoopAccessAnalysis.h" 85 #include "llvm/Analysis/LoopAnalysisManager.h" 86 #include "llvm/Analysis/LoopInfo.h" 87 #include "llvm/Analysis/LoopIterator.h" 88 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 89 #include "llvm/Analysis/ScalarEvolution.h" 90 #include "llvm/Analysis/ScalarEvolutionExpander.h" 91 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 92 #include "llvm/Analysis/TargetLibraryInfo.h" 93 #include "llvm/Analysis/TargetTransformInfo.h" 94 #include "llvm/Analysis/VectorUtils.h" 95 #include "llvm/IR/Attributes.h" 96 #include "llvm/IR/BasicBlock.h" 97 #include "llvm/IR/CFG.h" 98 #include "llvm/IR/Constant.h" 99 #include "llvm/IR/Constants.h" 100 #include "llvm/IR/DataLayout.h" 101 #include "llvm/IR/DebugInfoMetadata.h" 102 #include "llvm/IR/DebugLoc.h" 103 #include "llvm/IR/DerivedTypes.h" 104 #include "llvm/IR/DiagnosticInfo.h" 105 #include "llvm/IR/Dominators.h" 106 #include "llvm/IR/Function.h" 107 #include "llvm/IR/IRBuilder.h" 108 #include "llvm/IR/InstrTypes.h" 109 #include "llvm/IR/Instruction.h" 110 #include "llvm/IR/Instructions.h" 111 #include "llvm/IR/IntrinsicInst.h" 112 #include "llvm/IR/Intrinsics.h" 113 #include "llvm/IR/LLVMContext.h" 114 #include "llvm/IR/Metadata.h" 115 #include "llvm/IR/Module.h" 116 #include "llvm/IR/Operator.h" 117 #include "llvm/IR/Type.h" 118 #include "llvm/IR/Use.h" 119 #include "llvm/IR/User.h" 120 #include "llvm/IR/Value.h" 121 #include "llvm/IR/ValueHandle.h" 122 #include "llvm/IR/Verifier.h" 123 #include "llvm/Pass.h" 124 #include "llvm/Support/Casting.h" 125 #include "llvm/Support/CommandLine.h" 126 #include "llvm/Support/Compiler.h" 127 #include "llvm/Support/Debug.h" 128 #include "llvm/Support/ErrorHandling.h" 129 #include "llvm/Support/MathExtras.h" 130 #include "llvm/Support/raw_ostream.h" 131 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 132 #include "llvm/Transforms/Utils/LoopSimplify.h" 133 #include "llvm/Transforms/Utils/LoopUtils.h" 134 #include "llvm/Transforms/Utils/LoopVersioning.h" 135 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 136 #include <algorithm> 137 #include <cassert> 138 #include <cstdint> 139 #include <cstdlib> 140 #include <functional> 141 #include <iterator> 142 #include <limits> 143 #include <memory> 144 #include <string> 145 #include <tuple> 146 #include <utility> 147 #include <vector> 148 149 using namespace llvm; 150 151 #define LV_NAME "loop-vectorize" 152 #define DEBUG_TYPE LV_NAME 153 154 /// @{ 155 /// Metadata attribute names 156 static const char *const LLVMLoopVectorizeFollowupAll = 157 "llvm.loop.vectorize.followup_all"; 158 static const char *const LLVMLoopVectorizeFollowupVectorized = 159 "llvm.loop.vectorize.followup_vectorized"; 160 static const char *const LLVMLoopVectorizeFollowupEpilogue = 161 "llvm.loop.vectorize.followup_epilogue"; 162 /// @} 163 164 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 165 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 166 167 /// Loops with a known constant trip count below this number are vectorized only 168 /// if no scalar iteration overheads are incurred. 169 static cl::opt<unsigned> TinyTripCountVectorThreshold( 170 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 171 cl::desc("Loops with a constant trip count that is smaller than this " 172 "value are vectorized only if no scalar iteration overheads " 173 "are incurred.")); 174 175 static cl::opt<bool> MaximizeBandwidth( 176 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 177 cl::desc("Maximize bandwidth when selecting vectorization factor which " 178 "will be determined by the smallest type in loop.")); 179 180 static cl::opt<bool> EnableInterleavedMemAccesses( 181 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 182 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 183 184 /// An interleave-group may need masking if it resides in a block that needs 185 /// predication, or in order to mask away gaps. 186 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 187 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 188 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 189 190 /// We don't interleave loops with a known constant trip count below this 191 /// number. 192 static const unsigned TinyTripCountInterleaveThreshold = 128; 193 194 static cl::opt<unsigned> ForceTargetNumScalarRegs( 195 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 196 cl::desc("A flag that overrides the target's number of scalar registers.")); 197 198 static cl::opt<unsigned> ForceTargetNumVectorRegs( 199 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 200 cl::desc("A flag that overrides the target's number of vector registers.")); 201 202 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 203 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 204 cl::desc("A flag that overrides the target's max interleave factor for " 205 "scalar loops.")); 206 207 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 208 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 209 cl::desc("A flag that overrides the target's max interleave factor for " 210 "vectorized loops.")); 211 212 static cl::opt<unsigned> ForceTargetInstructionCost( 213 "force-target-instruction-cost", cl::init(0), cl::Hidden, 214 cl::desc("A flag that overrides the target's expected cost for " 215 "an instruction to a single constant value. Mostly " 216 "useful for getting consistent testing.")); 217 218 static cl::opt<unsigned> SmallLoopCost( 219 "small-loop-cost", cl::init(20), cl::Hidden, 220 cl::desc( 221 "The cost of a loop that is considered 'small' by the interleaver.")); 222 223 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 224 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 225 cl::desc("Enable the use of the block frequency analysis to access PGO " 226 "heuristics minimizing code growth in cold regions and being more " 227 "aggressive in hot regions.")); 228 229 // Runtime interleave loops for load/store throughput. 230 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 231 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 232 cl::desc( 233 "Enable runtime interleaving until load/store ports are saturated")); 234 235 /// The number of stores in a loop that are allowed to need predication. 236 static cl::opt<unsigned> NumberOfStoresToPredicate( 237 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 238 cl::desc("Max number of stores to be predicated behind an if.")); 239 240 static cl::opt<bool> EnableIndVarRegisterHeur( 241 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 242 cl::desc("Count the induction variable only once when interleaving")); 243 244 static cl::opt<bool> EnableCondStoresVectorization( 245 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 246 cl::desc("Enable if predication of stores during vectorization.")); 247 248 static cl::opt<unsigned> MaxNestedScalarReductionIC( 249 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 250 cl::desc("The maximum interleave count to use when interleaving a scalar " 251 "reduction in a nested loop.")); 252 253 cl::opt<bool> EnableVPlanNativePath( 254 "enable-vplan-native-path", cl::init(false), cl::Hidden, 255 cl::desc("Enable VPlan-native vectorization path with " 256 "support for outer loop vectorization.")); 257 258 // This flag enables the stress testing of the VPlan H-CFG construction in the 259 // VPlan-native vectorization path. It must be used in conjuction with 260 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 261 // verification of the H-CFGs built. 262 static cl::opt<bool> VPlanBuildStressTest( 263 "vplan-build-stress-test", cl::init(false), cl::Hidden, 264 cl::desc( 265 "Build VPlan for every supported loop nest in the function and bail " 266 "out right after the build (stress test the VPlan H-CFG construction " 267 "in the VPlan-native vectorization path).")); 268 269 /// A helper function for converting Scalar types to vector types. 270 /// If the incoming type is void, we return void. If the VF is 1, we return 271 /// the scalar type. 272 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 273 if (Scalar->isVoidTy() || VF == 1) 274 return Scalar; 275 return VectorType::get(Scalar, VF); 276 } 277 278 /// A helper function that returns the type of loaded or stored value. 279 static Type *getMemInstValueType(Value *I) { 280 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 281 "Expected Load or Store instruction"); 282 if (auto *LI = dyn_cast<LoadInst>(I)) 283 return LI->getType(); 284 return cast<StoreInst>(I)->getValueOperand()->getType(); 285 } 286 287 /// A helper function that returns true if the given type is irregular. The 288 /// type is irregular if its allocated size doesn't equal the store size of an 289 /// element of the corresponding vector type at the given vectorization factor. 290 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 291 // Determine if an array of VF elements of type Ty is "bitcast compatible" 292 // with a <VF x Ty> vector. 293 if (VF > 1) { 294 auto *VectorTy = VectorType::get(Ty, VF); 295 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 296 } 297 298 // If the vectorization factor is one, we just check if an array of type Ty 299 // requires padding between elements. 300 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 301 } 302 303 /// A helper function that returns the reciprocal of the block probability of 304 /// predicated blocks. If we return X, we are assuming the predicated block 305 /// will execute once for every X iterations of the loop header. 306 /// 307 /// TODO: We should use actual block probability here, if available. Currently, 308 /// we always assume predicated blocks have a 50% chance of executing. 309 static unsigned getReciprocalPredBlockProb() { return 2; } 310 311 /// A helper function that adds a 'fast' flag to floating-point operations. 312 static Value *addFastMathFlag(Value *V) { 313 if (isa<FPMathOperator>(V)) { 314 FastMathFlags Flags; 315 Flags.setFast(); 316 cast<Instruction>(V)->setFastMathFlags(Flags); 317 } 318 return V; 319 } 320 321 /// A helper function that returns an integer or floating-point constant with 322 /// value C. 323 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 324 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 325 : ConstantFP::get(Ty, C); 326 } 327 328 namespace llvm { 329 330 /// InnerLoopVectorizer vectorizes loops which contain only one basic 331 /// block to a specified vectorization factor (VF). 332 /// This class performs the widening of scalars into vectors, or multiple 333 /// scalars. This class also implements the following features: 334 /// * It inserts an epilogue loop for handling loops that don't have iteration 335 /// counts that are known to be a multiple of the vectorization factor. 336 /// * It handles the code generation for reduction variables. 337 /// * Scalarization (implementation using scalars) of un-vectorizable 338 /// instructions. 339 /// InnerLoopVectorizer does not perform any vectorization-legality 340 /// checks, and relies on the caller to check for the different legality 341 /// aspects. The InnerLoopVectorizer relies on the 342 /// LoopVectorizationLegality class to provide information about the induction 343 /// and reduction variables that were found to a given vectorization factor. 344 class InnerLoopVectorizer { 345 public: 346 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 347 LoopInfo *LI, DominatorTree *DT, 348 const TargetLibraryInfo *TLI, 349 const TargetTransformInfo *TTI, AssumptionCache *AC, 350 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 351 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 352 LoopVectorizationCostModel *CM) 353 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 354 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 355 Builder(PSE.getSE()->getContext()), 356 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 357 virtual ~InnerLoopVectorizer() = default; 358 359 /// Create a new empty loop. Unlink the old loop and connect the new one. 360 /// Return the pre-header block of the new loop. 361 BasicBlock *createVectorizedLoopSkeleton(); 362 363 /// Widen a single instruction within the innermost loop. 364 void widenInstruction(Instruction &I); 365 366 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 367 void fixVectorizedLoop(); 368 369 // Return true if any runtime check is added. 370 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 371 372 /// A type for vectorized values in the new loop. Each value from the 373 /// original loop, when vectorized, is represented by UF vector values in the 374 /// new unrolled loop, where UF is the unroll factor. 375 using VectorParts = SmallVector<Value *, 2>; 376 377 /// Vectorize a single PHINode in a block. This method handles the induction 378 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 379 /// arbitrary length vectors. 380 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 381 382 /// A helper function to scalarize a single Instruction in the innermost loop. 383 /// Generates a sequence of scalar instances for each lane between \p MinLane 384 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 385 /// inclusive.. 386 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 387 bool IfPredicateInstr); 388 389 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 390 /// is provided, the integer induction variable will first be truncated to 391 /// the corresponding type. 392 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 393 394 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 395 /// vector or scalar value on-demand if one is not yet available. When 396 /// vectorizing a loop, we visit the definition of an instruction before its 397 /// uses. When visiting the definition, we either vectorize or scalarize the 398 /// instruction, creating an entry for it in the corresponding map. (In some 399 /// cases, such as induction variables, we will create both vector and scalar 400 /// entries.) Then, as we encounter uses of the definition, we derive values 401 /// for each scalar or vector use unless such a value is already available. 402 /// For example, if we scalarize a definition and one of its uses is vector, 403 /// we build the required vector on-demand with an insertelement sequence 404 /// when visiting the use. Otherwise, if the use is scalar, we can use the 405 /// existing scalar definition. 406 /// 407 /// Return a value in the new loop corresponding to \p V from the original 408 /// loop at unroll index \p Part. If the value has already been vectorized, 409 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 410 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 411 /// a new vector value on-demand by inserting the scalar values into a vector 412 /// with an insertelement sequence. If the value has been neither vectorized 413 /// nor scalarized, it must be loop invariant, so we simply broadcast the 414 /// value into a vector. 415 Value *getOrCreateVectorValue(Value *V, unsigned Part); 416 417 /// Return a value in the new loop corresponding to \p V from the original 418 /// loop at unroll and vector indices \p Instance. If the value has been 419 /// vectorized but not scalarized, the necessary extractelement instruction 420 /// will be generated. 421 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 422 423 /// Construct the vector value of a scalarized value \p V one lane at a time. 424 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 425 426 /// Try to vectorize the interleaved access group that \p Instr belongs to, 427 /// optionally masking the vector operations if \p BlockInMask is non-null. 428 void vectorizeInterleaveGroup(Instruction *Instr, 429 VectorParts *BlockInMask = nullptr); 430 431 /// Vectorize Load and Store instructions, optionally masking the vector 432 /// operations if \p BlockInMask is non-null. 433 void vectorizeMemoryInstruction(Instruction *Instr, 434 VectorParts *BlockInMask = nullptr); 435 436 /// Set the debug location in the builder using the debug location in 437 /// the instruction. 438 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 439 440 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 441 void fixNonInductionPHIs(void); 442 443 protected: 444 friend class LoopVectorizationPlanner; 445 446 /// A small list of PHINodes. 447 using PhiVector = SmallVector<PHINode *, 4>; 448 449 /// A type for scalarized values in the new loop. Each value from the 450 /// original loop, when scalarized, is represented by UF x VF scalar values 451 /// in the new unrolled loop, where UF is the unroll factor and VF is the 452 /// vectorization factor. 453 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 454 455 /// Set up the values of the IVs correctly when exiting the vector loop. 456 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 457 Value *CountRoundDown, Value *EndValue, 458 BasicBlock *MiddleBlock); 459 460 /// Create a new induction variable inside L. 461 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 462 Value *Step, Instruction *DL); 463 464 /// Handle all cross-iteration phis in the header. 465 void fixCrossIterationPHIs(); 466 467 /// Fix a first-order recurrence. This is the second phase of vectorizing 468 /// this phi node. 469 void fixFirstOrderRecurrence(PHINode *Phi); 470 471 /// Fix a reduction cross-iteration phi. This is the second phase of 472 /// vectorizing this phi node. 473 void fixReduction(PHINode *Phi); 474 475 /// The Loop exit block may have single value PHI nodes with some 476 /// incoming value. While vectorizing we only handled real values 477 /// that were defined inside the loop and we should have one value for 478 /// each predecessor of its parent basic block. See PR14725. 479 void fixLCSSAPHIs(); 480 481 /// Iteratively sink the scalarized operands of a predicated instruction into 482 /// the block that was created for it. 483 void sinkScalarOperands(Instruction *PredInst); 484 485 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 486 /// represented as. 487 void truncateToMinimalBitwidths(); 488 489 /// Insert the new loop to the loop hierarchy and pass manager 490 /// and update the analysis passes. 491 void updateAnalysis(); 492 493 /// Create a broadcast instruction. This method generates a broadcast 494 /// instruction (shuffle) for loop invariant values and for the induction 495 /// value. If this is the induction variable then we extend it to N, N+1, ... 496 /// this is needed because each iteration in the loop corresponds to a SIMD 497 /// element. 498 virtual Value *getBroadcastInstrs(Value *V); 499 500 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 501 /// to each vector element of Val. The sequence starts at StartIndex. 502 /// \p Opcode is relevant for FP induction variable. 503 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 504 Instruction::BinaryOps Opcode = 505 Instruction::BinaryOpsEnd); 506 507 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 508 /// variable on which to base the steps, \p Step is the size of the step, and 509 /// \p EntryVal is the value from the original loop that maps to the steps. 510 /// Note that \p EntryVal doesn't have to be an induction variable - it 511 /// can also be a truncate instruction. 512 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 513 const InductionDescriptor &ID); 514 515 /// Create a vector induction phi node based on an existing scalar one. \p 516 /// EntryVal is the value from the original loop that maps to the vector phi 517 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 518 /// truncate instruction, instead of widening the original IV, we widen a 519 /// version of the IV truncated to \p EntryVal's type. 520 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 521 Value *Step, Instruction *EntryVal); 522 523 /// Returns true if an instruction \p I should be scalarized instead of 524 /// vectorized for the chosen vectorization factor. 525 bool shouldScalarizeInstruction(Instruction *I) const; 526 527 /// Returns true if we should generate a scalar version of \p IV. 528 bool needsScalarInduction(Instruction *IV) const; 529 530 /// If there is a cast involved in the induction variable \p ID, which should 531 /// be ignored in the vectorized loop body, this function records the 532 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 533 /// cast. We had already proved that the casted Phi is equal to the uncasted 534 /// Phi in the vectorized loop (under a runtime guard), and therefore 535 /// there is no need to vectorize the cast - the same value can be used in the 536 /// vector loop for both the Phi and the cast. 537 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 538 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 539 /// 540 /// \p EntryVal is the value from the original loop that maps to the vector 541 /// phi node and is used to distinguish what is the IV currently being 542 /// processed - original one (if \p EntryVal is a phi corresponding to the 543 /// original IV) or the "newly-created" one based on the proof mentioned above 544 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 545 /// latter case \p EntryVal is a TruncInst and we must not record anything for 546 /// that IV, but it's error-prone to expect callers of this routine to care 547 /// about that, hence this explicit parameter. 548 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 549 const Instruction *EntryVal, 550 Value *VectorLoopValue, 551 unsigned Part, 552 unsigned Lane = UINT_MAX); 553 554 /// Generate a shuffle sequence that will reverse the vector Vec. 555 virtual Value *reverseVector(Value *Vec); 556 557 /// Returns (and creates if needed) the original loop trip count. 558 Value *getOrCreateTripCount(Loop *NewLoop); 559 560 /// Returns (and creates if needed) the trip count of the widened loop. 561 Value *getOrCreateVectorTripCount(Loop *NewLoop); 562 563 /// Returns a bitcasted value to the requested vector type. 564 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 565 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 566 const DataLayout &DL); 567 568 /// Emit a bypass check to see if the vector trip count is zero, including if 569 /// it overflows. 570 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 571 572 /// Emit a bypass check to see if all of the SCEV assumptions we've 573 /// had to make are correct. 574 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 575 576 /// Emit bypass checks to check any memory assumptions we may have made. 577 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 578 579 /// Compute the transformed value of Index at offset StartValue using step 580 /// StepValue. 581 /// For integer induction, returns StartValue + Index * StepValue. 582 /// For pointer induction, returns StartValue[Index * StepValue]. 583 /// FIXME: The newly created binary instructions should contain nsw/nuw 584 /// flags, which can be found from the original scalar operations. 585 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 586 const DataLayout &DL, 587 const InductionDescriptor &ID) const; 588 589 /// Add additional metadata to \p To that was not present on \p Orig. 590 /// 591 /// Currently this is used to add the noalias annotations based on the 592 /// inserted memchecks. Use this for instructions that are *cloned* into the 593 /// vector loop. 594 void addNewMetadata(Instruction *To, const Instruction *Orig); 595 596 /// Add metadata from one instruction to another. 597 /// 598 /// This includes both the original MDs from \p From and additional ones (\see 599 /// addNewMetadata). Use this for *newly created* instructions in the vector 600 /// loop. 601 void addMetadata(Instruction *To, Instruction *From); 602 603 /// Similar to the previous function but it adds the metadata to a 604 /// vector of instructions. 605 void addMetadata(ArrayRef<Value *> To, Instruction *From); 606 607 /// The original loop. 608 Loop *OrigLoop; 609 610 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 611 /// dynamic knowledge to simplify SCEV expressions and converts them to a 612 /// more usable form. 613 PredicatedScalarEvolution &PSE; 614 615 /// Loop Info. 616 LoopInfo *LI; 617 618 /// Dominator Tree. 619 DominatorTree *DT; 620 621 /// Alias Analysis. 622 AliasAnalysis *AA; 623 624 /// Target Library Info. 625 const TargetLibraryInfo *TLI; 626 627 /// Target Transform Info. 628 const TargetTransformInfo *TTI; 629 630 /// Assumption Cache. 631 AssumptionCache *AC; 632 633 /// Interface to emit optimization remarks. 634 OptimizationRemarkEmitter *ORE; 635 636 /// LoopVersioning. It's only set up (non-null) if memchecks were 637 /// used. 638 /// 639 /// This is currently only used to add no-alias metadata based on the 640 /// memchecks. The actually versioning is performed manually. 641 std::unique_ptr<LoopVersioning> LVer; 642 643 /// The vectorization SIMD factor to use. Each vector will have this many 644 /// vector elements. 645 unsigned VF; 646 647 /// The vectorization unroll factor to use. Each scalar is vectorized to this 648 /// many different vector instructions. 649 unsigned UF; 650 651 /// The builder that we use 652 IRBuilder<> Builder; 653 654 // --- Vectorization state --- 655 656 /// The vector-loop preheader. 657 BasicBlock *LoopVectorPreHeader; 658 659 /// The scalar-loop preheader. 660 BasicBlock *LoopScalarPreHeader; 661 662 /// Middle Block between the vector and the scalar. 663 BasicBlock *LoopMiddleBlock; 664 665 /// The ExitBlock of the scalar loop. 666 BasicBlock *LoopExitBlock; 667 668 /// The vector loop body. 669 BasicBlock *LoopVectorBody; 670 671 /// The scalar loop body. 672 BasicBlock *LoopScalarBody; 673 674 /// A list of all bypass blocks. The first block is the entry of the loop. 675 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 676 677 /// The new Induction variable which was added to the new block. 678 PHINode *Induction = nullptr; 679 680 /// The induction variable of the old basic block. 681 PHINode *OldInduction = nullptr; 682 683 /// Maps values from the original loop to their corresponding values in the 684 /// vectorized loop. A key value can map to either vector values, scalar 685 /// values or both kinds of values, depending on whether the key was 686 /// vectorized and scalarized. 687 VectorizerValueMap VectorLoopValueMap; 688 689 /// Store instructions that were predicated. 690 SmallVector<Instruction *, 4> PredicatedInstructions; 691 692 /// Trip count of the original loop. 693 Value *TripCount = nullptr; 694 695 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 696 Value *VectorTripCount = nullptr; 697 698 /// The legality analysis. 699 LoopVectorizationLegality *Legal; 700 701 /// The profitablity analysis. 702 LoopVectorizationCostModel *Cost; 703 704 // Record whether runtime checks are added. 705 bool AddedSafetyChecks = false; 706 707 // Holds the end values for each induction variable. We save the end values 708 // so we can later fix-up the external users of the induction variables. 709 DenseMap<PHINode *, Value *> IVEndValues; 710 711 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 712 // fixed up at the end of vector code generation. 713 SmallVector<PHINode *, 8> OrigPHIsToFix; 714 }; 715 716 class InnerLoopUnroller : public InnerLoopVectorizer { 717 public: 718 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 719 LoopInfo *LI, DominatorTree *DT, 720 const TargetLibraryInfo *TLI, 721 const TargetTransformInfo *TTI, AssumptionCache *AC, 722 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 723 LoopVectorizationLegality *LVL, 724 LoopVectorizationCostModel *CM) 725 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 726 UnrollFactor, LVL, CM) {} 727 728 private: 729 Value *getBroadcastInstrs(Value *V) override; 730 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 731 Instruction::BinaryOps Opcode = 732 Instruction::BinaryOpsEnd) override; 733 Value *reverseVector(Value *Vec) override; 734 }; 735 736 } // end namespace llvm 737 738 /// Look for a meaningful debug location on the instruction or it's 739 /// operands. 740 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 741 if (!I) 742 return I; 743 744 DebugLoc Empty; 745 if (I->getDebugLoc() != Empty) 746 return I; 747 748 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 749 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 750 if (OpInst->getDebugLoc() != Empty) 751 return OpInst; 752 } 753 754 return I; 755 } 756 757 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 758 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 759 const DILocation *DIL = Inst->getDebugLoc(); 760 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 761 !isa<DbgInfoIntrinsic>(Inst)) { 762 auto NewDIL = DIL->cloneWithDuplicationFactor(UF * VF); 763 if (NewDIL) 764 B.SetCurrentDebugLocation(NewDIL.getValue()); 765 else 766 LLVM_DEBUG(dbgs() 767 << "Failed to create new discriminator: " 768 << DIL->getFilename() << " Line: " << DIL->getLine()); 769 } 770 else 771 B.SetCurrentDebugLocation(DIL); 772 } else 773 B.SetCurrentDebugLocation(DebugLoc()); 774 } 775 776 #ifndef NDEBUG 777 /// \return string containing a file name and a line # for the given loop. 778 static std::string getDebugLocString(const Loop *L) { 779 std::string Result; 780 if (L) { 781 raw_string_ostream OS(Result); 782 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 783 LoopDbgLoc.print(OS); 784 else 785 // Just print the module name. 786 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 787 OS.flush(); 788 } 789 return Result; 790 } 791 #endif 792 793 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 794 const Instruction *Orig) { 795 // If the loop was versioned with memchecks, add the corresponding no-alias 796 // metadata. 797 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 798 LVer->annotateInstWithNoAlias(To, Orig); 799 } 800 801 void InnerLoopVectorizer::addMetadata(Instruction *To, 802 Instruction *From) { 803 propagateMetadata(To, From); 804 addNewMetadata(To, From); 805 } 806 807 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 808 Instruction *From) { 809 for (Value *V : To) { 810 if (Instruction *I = dyn_cast<Instruction>(V)) 811 addMetadata(I, From); 812 } 813 } 814 815 namespace llvm { 816 817 /// LoopVectorizationCostModel - estimates the expected speedups due to 818 /// vectorization. 819 /// In many cases vectorization is not profitable. This can happen because of 820 /// a number of reasons. In this class we mainly attempt to predict the 821 /// expected speedup/slowdowns due to the supported instruction set. We use the 822 /// TargetTransformInfo to query the different backends for the cost of 823 /// different operations. 824 class LoopVectorizationCostModel { 825 public: 826 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 827 LoopInfo *LI, LoopVectorizationLegality *Legal, 828 const TargetTransformInfo &TTI, 829 const TargetLibraryInfo *TLI, DemandedBits *DB, 830 AssumptionCache *AC, 831 OptimizationRemarkEmitter *ORE, const Function *F, 832 const LoopVectorizeHints *Hints, 833 InterleavedAccessInfo &IAI) 834 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 835 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {} 836 837 /// \return An upper bound for the vectorization factor, or None if 838 /// vectorization should be avoided up front. 839 Optional<unsigned> computeMaxVF(bool OptForSize); 840 841 /// \return The most profitable vectorization factor and the cost of that VF. 842 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 843 /// then this vectorization factor will be selected if vectorization is 844 /// possible. 845 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 846 847 /// Setup cost-based decisions for user vectorization factor. 848 void selectUserVectorizationFactor(unsigned UserVF) { 849 collectUniformsAndScalars(UserVF); 850 collectInstsToScalarize(UserVF); 851 } 852 853 /// \return The size (in bits) of the smallest and widest types in the code 854 /// that needs to be vectorized. We ignore values that remain scalar such as 855 /// 64 bit loop indices. 856 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 857 858 /// \return The desired interleave count. 859 /// If interleave count has been specified by metadata it will be returned. 860 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 861 /// are the selected vectorization factor and the cost of the selected VF. 862 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 863 unsigned LoopCost); 864 865 /// Memory access instruction may be vectorized in more than one way. 866 /// Form of instruction after vectorization depends on cost. 867 /// This function takes cost-based decisions for Load/Store instructions 868 /// and collects them in a map. This decisions map is used for building 869 /// the lists of loop-uniform and loop-scalar instructions. 870 /// The calculated cost is saved with widening decision in order to 871 /// avoid redundant calculations. 872 void setCostBasedWideningDecision(unsigned VF); 873 874 /// A struct that represents some properties of the register usage 875 /// of a loop. 876 struct RegisterUsage { 877 /// Holds the number of loop invariant values that are used in the loop. 878 unsigned LoopInvariantRegs; 879 880 /// Holds the maximum number of concurrent live intervals in the loop. 881 unsigned MaxLocalUsers; 882 }; 883 884 /// \return Returns information about the register usages of the loop for the 885 /// given vectorization factors. 886 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 887 888 /// Collect values we want to ignore in the cost model. 889 void collectValuesToIgnore(); 890 891 /// \returns The smallest bitwidth each instruction can be represented with. 892 /// The vector equivalents of these instructions should be truncated to this 893 /// type. 894 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 895 return MinBWs; 896 } 897 898 /// \returns True if it is more profitable to scalarize instruction \p I for 899 /// vectorization factor \p VF. 900 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 901 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 902 903 // Cost model is not run in the VPlan-native path - return conservative 904 // result until this changes. 905 if (EnableVPlanNativePath) 906 return false; 907 908 auto Scalars = InstsToScalarize.find(VF); 909 assert(Scalars != InstsToScalarize.end() && 910 "VF not yet analyzed for scalarization profitability"); 911 return Scalars->second.find(I) != Scalars->second.end(); 912 } 913 914 /// Returns true if \p I is known to be uniform after vectorization. 915 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 916 if (VF == 1) 917 return true; 918 919 // Cost model is not run in the VPlan-native path - return conservative 920 // result until this changes. 921 if (EnableVPlanNativePath) 922 return false; 923 924 auto UniformsPerVF = Uniforms.find(VF); 925 assert(UniformsPerVF != Uniforms.end() && 926 "VF not yet analyzed for uniformity"); 927 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 928 } 929 930 /// Returns true if \p I is known to be scalar after vectorization. 931 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 932 if (VF == 1) 933 return true; 934 935 // Cost model is not run in the VPlan-native path - return conservative 936 // result until this changes. 937 if (EnableVPlanNativePath) 938 return false; 939 940 auto ScalarsPerVF = Scalars.find(VF); 941 assert(ScalarsPerVF != Scalars.end() && 942 "Scalar values are not calculated for VF"); 943 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 944 } 945 946 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 947 /// for vectorization factor \p VF. 948 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 949 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 950 !isProfitableToScalarize(I, VF) && 951 !isScalarAfterVectorization(I, VF); 952 } 953 954 /// Decision that was taken during cost calculation for memory instruction. 955 enum InstWidening { 956 CM_Unknown, 957 CM_Widen, // For consecutive accesses with stride +1. 958 CM_Widen_Reverse, // For consecutive accesses with stride -1. 959 CM_Interleave, 960 CM_GatherScatter, 961 CM_Scalarize 962 }; 963 964 /// Save vectorization decision \p W and \p Cost taken by the cost model for 965 /// instruction \p I and vector width \p VF. 966 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 967 unsigned Cost) { 968 assert(VF >= 2 && "Expected VF >=2"); 969 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 970 } 971 972 /// Save vectorization decision \p W and \p Cost taken by the cost model for 973 /// interleaving group \p Grp and vector width \p VF. 974 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 975 InstWidening W, unsigned Cost) { 976 assert(VF >= 2 && "Expected VF >=2"); 977 /// Broadcast this decicion to all instructions inside the group. 978 /// But the cost will be assigned to one instruction only. 979 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 980 if (auto *I = Grp->getMember(i)) { 981 if (Grp->getInsertPos() == I) 982 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 983 else 984 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 985 } 986 } 987 } 988 989 /// Return the cost model decision for the given instruction \p I and vector 990 /// width \p VF. Return CM_Unknown if this instruction did not pass 991 /// through the cost modeling. 992 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 993 assert(VF >= 2 && "Expected VF >=2"); 994 995 // Cost model is not run in the VPlan-native path - return conservative 996 // result until this changes. 997 if (EnableVPlanNativePath) 998 return CM_GatherScatter; 999 1000 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1001 auto Itr = WideningDecisions.find(InstOnVF); 1002 if (Itr == WideningDecisions.end()) 1003 return CM_Unknown; 1004 return Itr->second.first; 1005 } 1006 1007 /// Return the vectorization cost for the given instruction \p I and vector 1008 /// width \p VF. 1009 unsigned getWideningCost(Instruction *I, unsigned VF) { 1010 assert(VF >= 2 && "Expected VF >=2"); 1011 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1012 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1013 "The cost is not calculated"); 1014 return WideningDecisions[InstOnVF].second; 1015 } 1016 1017 /// Return True if instruction \p I is an optimizable truncate whose operand 1018 /// is an induction variable. Such a truncate will be removed by adding a new 1019 /// induction variable with the destination type. 1020 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1021 // If the instruction is not a truncate, return false. 1022 auto *Trunc = dyn_cast<TruncInst>(I); 1023 if (!Trunc) 1024 return false; 1025 1026 // Get the source and destination types of the truncate. 1027 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1028 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1029 1030 // If the truncate is free for the given types, return false. Replacing a 1031 // free truncate with an induction variable would add an induction variable 1032 // update instruction to each iteration of the loop. We exclude from this 1033 // check the primary induction variable since it will need an update 1034 // instruction regardless. 1035 Value *Op = Trunc->getOperand(0); 1036 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1037 return false; 1038 1039 // If the truncated value is not an induction variable, return false. 1040 return Legal->isInductionPhi(Op); 1041 } 1042 1043 /// Collects the instructions to scalarize for each predicated instruction in 1044 /// the loop. 1045 void collectInstsToScalarize(unsigned VF); 1046 1047 /// Collect Uniform and Scalar values for the given \p VF. 1048 /// The sets depend on CM decision for Load/Store instructions 1049 /// that may be vectorized as interleave, gather-scatter or scalarized. 1050 void collectUniformsAndScalars(unsigned VF) { 1051 // Do the analysis once. 1052 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1053 return; 1054 setCostBasedWideningDecision(VF); 1055 collectLoopUniforms(VF); 1056 collectLoopScalars(VF); 1057 } 1058 1059 /// Returns true if the target machine supports masked store operation 1060 /// for the given \p DataType and kind of access to \p Ptr. 1061 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1062 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType); 1063 } 1064 1065 /// Returns true if the target machine supports masked load operation 1066 /// for the given \p DataType and kind of access to \p Ptr. 1067 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1068 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType); 1069 } 1070 1071 /// Returns true if the target machine supports masked scatter operation 1072 /// for the given \p DataType. 1073 bool isLegalMaskedScatter(Type *DataType) { 1074 return TTI.isLegalMaskedScatter(DataType); 1075 } 1076 1077 /// Returns true if the target machine supports masked gather operation 1078 /// for the given \p DataType. 1079 bool isLegalMaskedGather(Type *DataType) { 1080 return TTI.isLegalMaskedGather(DataType); 1081 } 1082 1083 /// Returns true if the target machine can represent \p V as a masked gather 1084 /// or scatter operation. 1085 bool isLegalGatherOrScatter(Value *V) { 1086 bool LI = isa<LoadInst>(V); 1087 bool SI = isa<StoreInst>(V); 1088 if (!LI && !SI) 1089 return false; 1090 auto *Ty = getMemInstValueType(V); 1091 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1092 } 1093 1094 /// Returns true if \p I is an instruction that will be scalarized with 1095 /// predication. Such instructions include conditional stores and 1096 /// instructions that may divide by zero. 1097 /// If a non-zero VF has been calculated, we check if I will be scalarized 1098 /// predication for that VF. 1099 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1100 1101 // Returns true if \p I is an instruction that will be predicated either 1102 // through scalar predication or masked load/store or masked gather/scatter. 1103 // Superset of instructions that return true for isScalarWithPredication. 1104 bool isPredicatedInst(Instruction *I) { 1105 if (!blockNeedsPredication(I->getParent())) 1106 return false; 1107 // Loads and stores that need some form of masked operation are predicated 1108 // instructions. 1109 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1110 return Legal->isMaskRequired(I); 1111 return isScalarWithPredication(I); 1112 } 1113 1114 /// Returns true if \p I is a memory instruction with consecutive memory 1115 /// access that can be widened. 1116 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1117 1118 /// Returns true if \p I is a memory instruction in an interleaved-group 1119 /// of memory accesses that can be vectorized with wide vector loads/stores 1120 /// and shuffles. 1121 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1122 1123 /// Check if \p Instr belongs to any interleaved access group. 1124 bool isAccessInterleaved(Instruction *Instr) { 1125 return InterleaveInfo.isInterleaved(Instr); 1126 } 1127 1128 /// Get the interleaved access group that \p Instr belongs to. 1129 const InterleaveGroup<Instruction> * 1130 getInterleavedAccessGroup(Instruction *Instr) { 1131 return InterleaveInfo.getInterleaveGroup(Instr); 1132 } 1133 1134 /// Returns true if an interleaved group requires a scalar iteration 1135 /// to handle accesses with gaps, and there is nothing preventing us from 1136 /// creating a scalar epilogue. 1137 bool requiresScalarEpilogue() const { 1138 return IsScalarEpilogueAllowed && InterleaveInfo.requiresScalarEpilogue(); 1139 } 1140 1141 /// Returns true if a scalar epilogue is not allowed due to optsize. 1142 bool isScalarEpilogueAllowed() const { return IsScalarEpilogueAllowed; } 1143 1144 /// Returns true if all loop blocks should be masked to fold tail loop. 1145 bool foldTailByMasking() const { return FoldTailByMasking; } 1146 1147 bool blockNeedsPredication(BasicBlock *BB) { 1148 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1149 } 1150 1151 private: 1152 unsigned NumPredStores = 0; 1153 1154 /// \return An upper bound for the vectorization factor, larger than zero. 1155 /// One is returned if vectorization should best be avoided due to cost. 1156 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 1157 1158 /// The vectorization cost is a combination of the cost itself and a boolean 1159 /// indicating whether any of the contributing operations will actually 1160 /// operate on 1161 /// vector values after type legalization in the backend. If this latter value 1162 /// is 1163 /// false, then all operations will be scalarized (i.e. no vectorization has 1164 /// actually taken place). 1165 using VectorizationCostTy = std::pair<unsigned, bool>; 1166 1167 /// Returns the expected execution cost. The unit of the cost does 1168 /// not matter because we use the 'cost' units to compare different 1169 /// vector widths. The cost that is returned is *not* normalized by 1170 /// the factor width. 1171 VectorizationCostTy expectedCost(unsigned VF); 1172 1173 /// Returns the execution time cost of an instruction for a given vector 1174 /// width. Vector width of one means scalar. 1175 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1176 1177 /// The cost-computation logic from getInstructionCost which provides 1178 /// the vector type as an output parameter. 1179 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1180 1181 /// Calculate vectorization cost of memory instruction \p I. 1182 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1183 1184 /// The cost computation for scalarized memory instruction. 1185 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1186 1187 /// The cost computation for interleaving group of memory instructions. 1188 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1189 1190 /// The cost computation for Gather/Scatter instruction. 1191 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1192 1193 /// The cost computation for widening instruction \p I with consecutive 1194 /// memory access. 1195 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1196 1197 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1198 /// Load: scalar load + broadcast. 1199 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1200 /// element) 1201 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1202 1203 /// Returns whether the instruction is a load or store and will be a emitted 1204 /// as a vector operation. 1205 bool isConsecutiveLoadOrStore(Instruction *I); 1206 1207 /// Returns true if an artificially high cost for emulated masked memrefs 1208 /// should be used. 1209 bool useEmulatedMaskMemRefHack(Instruction *I); 1210 1211 /// Create an analysis remark that explains why vectorization failed 1212 /// 1213 /// \p RemarkName is the identifier for the remark. \return the remark object 1214 /// that can be streamed to. 1215 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1216 return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1217 RemarkName, TheLoop); 1218 } 1219 1220 /// Map of scalar integer values to the smallest bitwidth they can be legally 1221 /// represented as. The vector equivalents of these values should be truncated 1222 /// to this type. 1223 MapVector<Instruction *, uint64_t> MinBWs; 1224 1225 /// A type representing the costs for instructions if they were to be 1226 /// scalarized rather than vectorized. The entries are Instruction-Cost 1227 /// pairs. 1228 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1229 1230 /// A set containing all BasicBlocks that are known to present after 1231 /// vectorization as a predicated block. 1232 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1233 1234 /// Records whether it is allowed to have the original scalar loop execute at 1235 /// least once. This may be needed as a fallback loop in case runtime 1236 /// aliasing/dependence checks fail, or to handle the tail/remainder 1237 /// iterations when the trip count is unknown or doesn't divide by the VF, 1238 /// or as a peel-loop to handle gaps in interleave-groups. 1239 /// Under optsize and when the trip count is very small we don't allow any 1240 /// iterations to execute in the scalar loop. 1241 bool IsScalarEpilogueAllowed = true; 1242 1243 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1244 bool FoldTailByMasking = false; 1245 1246 /// A map holding scalar costs for different vectorization factors. The 1247 /// presence of a cost for an instruction in the mapping indicates that the 1248 /// instruction will be scalarized when vectorizing with the associated 1249 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1250 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1251 1252 /// Holds the instructions known to be uniform after vectorization. 1253 /// The data is collected per VF. 1254 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1255 1256 /// Holds the instructions known to be scalar after vectorization. 1257 /// The data is collected per VF. 1258 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1259 1260 /// Holds the instructions (address computations) that are forced to be 1261 /// scalarized. 1262 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1263 1264 /// Returns the expected difference in cost from scalarizing the expression 1265 /// feeding a predicated instruction \p PredInst. The instructions to 1266 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1267 /// non-negative return value implies the expression will be scalarized. 1268 /// Currently, only single-use chains are considered for scalarization. 1269 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1270 unsigned VF); 1271 1272 /// Collect the instructions that are uniform after vectorization. An 1273 /// instruction is uniform if we represent it with a single scalar value in 1274 /// the vectorized loop corresponding to each vector iteration. Examples of 1275 /// uniform instructions include pointer operands of consecutive or 1276 /// interleaved memory accesses. Note that although uniformity implies an 1277 /// instruction will be scalar, the reverse is not true. In general, a 1278 /// scalarized instruction will be represented by VF scalar values in the 1279 /// vectorized loop, each corresponding to an iteration of the original 1280 /// scalar loop. 1281 void collectLoopUniforms(unsigned VF); 1282 1283 /// Collect the instructions that are scalar after vectorization. An 1284 /// instruction is scalar if it is known to be uniform or will be scalarized 1285 /// during vectorization. Non-uniform scalarized instructions will be 1286 /// represented by VF values in the vectorized loop, each corresponding to an 1287 /// iteration of the original scalar loop. 1288 void collectLoopScalars(unsigned VF); 1289 1290 /// Keeps cost model vectorization decision and cost for instructions. 1291 /// Right now it is used for memory instructions only. 1292 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1293 std::pair<InstWidening, unsigned>>; 1294 1295 DecisionList WideningDecisions; 1296 1297 public: 1298 /// The loop that we evaluate. 1299 Loop *TheLoop; 1300 1301 /// Predicated scalar evolution analysis. 1302 PredicatedScalarEvolution &PSE; 1303 1304 /// Loop Info analysis. 1305 LoopInfo *LI; 1306 1307 /// Vectorization legality. 1308 LoopVectorizationLegality *Legal; 1309 1310 /// Vector target information. 1311 const TargetTransformInfo &TTI; 1312 1313 /// Target Library Info. 1314 const TargetLibraryInfo *TLI; 1315 1316 /// Demanded bits analysis. 1317 DemandedBits *DB; 1318 1319 /// Assumption cache. 1320 AssumptionCache *AC; 1321 1322 /// Interface to emit optimization remarks. 1323 OptimizationRemarkEmitter *ORE; 1324 1325 const Function *TheFunction; 1326 1327 /// Loop Vectorize Hint. 1328 const LoopVectorizeHints *Hints; 1329 1330 /// The interleave access information contains groups of interleaved accesses 1331 /// with the same stride and close to each other. 1332 InterleavedAccessInfo &InterleaveInfo; 1333 1334 /// Values to ignore in the cost model. 1335 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1336 1337 /// Values to ignore in the cost model when VF > 1. 1338 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1339 }; 1340 1341 } // end namespace llvm 1342 1343 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1344 // vectorization. The loop needs to be annotated with #pragma omp simd 1345 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1346 // vector length information is not provided, vectorization is not considered 1347 // explicit. Interleave hints are not allowed either. These limitations will be 1348 // relaxed in the future. 1349 // Please, note that we are currently forced to abuse the pragma 'clang 1350 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1351 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1352 // provides *explicit vectorization hints* (LV can bypass legal checks and 1353 // assume that vectorization is legal). However, both hints are implemented 1354 // using the same metadata (llvm.loop.vectorize, processed by 1355 // LoopVectorizeHints). This will be fixed in the future when the native IR 1356 // representation for pragma 'omp simd' is introduced. 1357 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1358 OptimizationRemarkEmitter *ORE) { 1359 assert(!OuterLp->empty() && "This is not an outer loop"); 1360 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1361 1362 // Only outer loops with an explicit vectorization hint are supported. 1363 // Unannotated outer loops are ignored. 1364 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1365 return false; 1366 1367 Function *Fn = OuterLp->getHeader()->getParent(); 1368 if (!Hints.allowVectorization(Fn, OuterLp, 1369 true /*VectorizeOnlyWhenForced*/)) { 1370 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1371 return false; 1372 } 1373 1374 if (!Hints.getWidth()) { 1375 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No user vector width.\n"); 1376 Hints.emitRemarkWithHints(); 1377 return false; 1378 } 1379 1380 if (Hints.getInterleave() > 1) { 1381 // TODO: Interleave support is future work. 1382 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1383 "outer loops.\n"); 1384 Hints.emitRemarkWithHints(); 1385 return false; 1386 } 1387 1388 return true; 1389 } 1390 1391 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1392 OptimizationRemarkEmitter *ORE, 1393 SmallVectorImpl<Loop *> &V) { 1394 // Collect inner loops and outer loops without irreducible control flow. For 1395 // now, only collect outer loops that have explicit vectorization hints. If we 1396 // are stress testing the VPlan H-CFG construction, we collect the outermost 1397 // loop of every loop nest. 1398 if (L.empty() || VPlanBuildStressTest || 1399 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1400 LoopBlocksRPO RPOT(&L); 1401 RPOT.perform(LI); 1402 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1403 V.push_back(&L); 1404 // TODO: Collect inner loops inside marked outer loops in case 1405 // vectorization fails for the outer loop. Do not invoke 1406 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1407 // already known to be reducible. We can use an inherited attribute for 1408 // that. 1409 return; 1410 } 1411 } 1412 for (Loop *InnerL : L) 1413 collectSupportedLoops(*InnerL, LI, ORE, V); 1414 } 1415 1416 namespace { 1417 1418 /// The LoopVectorize Pass. 1419 struct LoopVectorize : public FunctionPass { 1420 /// Pass identification, replacement for typeid 1421 static char ID; 1422 1423 LoopVectorizePass Impl; 1424 1425 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1426 bool VectorizeOnlyWhenForced = false) 1427 : FunctionPass(ID) { 1428 Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced; 1429 Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced; 1430 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1431 } 1432 1433 bool runOnFunction(Function &F) override { 1434 if (skipFunction(F)) 1435 return false; 1436 1437 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1438 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1439 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1440 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1441 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1442 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1443 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1444 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1445 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1446 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1447 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1448 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1449 1450 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1451 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1452 1453 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1454 GetLAA, *ORE); 1455 } 1456 1457 void getAnalysisUsage(AnalysisUsage &AU) const override { 1458 AU.addRequired<AssumptionCacheTracker>(); 1459 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1460 AU.addRequired<DominatorTreeWrapperPass>(); 1461 AU.addRequired<LoopInfoWrapperPass>(); 1462 AU.addRequired<ScalarEvolutionWrapperPass>(); 1463 AU.addRequired<TargetTransformInfoWrapperPass>(); 1464 AU.addRequired<AAResultsWrapperPass>(); 1465 AU.addRequired<LoopAccessLegacyAnalysis>(); 1466 AU.addRequired<DemandedBitsWrapperPass>(); 1467 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1468 1469 // We currently do not preserve loopinfo/dominator analyses with outer loop 1470 // vectorization. Until this is addressed, mark these analyses as preserved 1471 // only for non-VPlan-native path. 1472 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1473 if (!EnableVPlanNativePath) { 1474 AU.addPreserved<LoopInfoWrapperPass>(); 1475 AU.addPreserved<DominatorTreeWrapperPass>(); 1476 } 1477 1478 AU.addPreserved<BasicAAWrapperPass>(); 1479 AU.addPreserved<GlobalsAAWrapperPass>(); 1480 } 1481 }; 1482 1483 } // end anonymous namespace 1484 1485 //===----------------------------------------------------------------------===// 1486 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1487 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1488 //===----------------------------------------------------------------------===// 1489 1490 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1491 // We need to place the broadcast of invariant variables outside the loop, 1492 // but only if it's proven safe to do so. Else, broadcast will be inside 1493 // vector loop body. 1494 Instruction *Instr = dyn_cast<Instruction>(V); 1495 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1496 (!Instr || 1497 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1498 // Place the code for broadcasting invariant variables in the new preheader. 1499 IRBuilder<>::InsertPointGuard Guard(Builder); 1500 if (SafeToHoist) 1501 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1502 1503 // Broadcast the scalar into all locations in the vector. 1504 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1505 1506 return Shuf; 1507 } 1508 1509 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1510 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1511 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1512 "Expected either an induction phi-node or a truncate of it!"); 1513 Value *Start = II.getStartValue(); 1514 1515 // Construct the initial value of the vector IV in the vector loop preheader 1516 auto CurrIP = Builder.saveIP(); 1517 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1518 if (isa<TruncInst>(EntryVal)) { 1519 assert(Start->getType()->isIntegerTy() && 1520 "Truncation requires an integer type"); 1521 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1522 Step = Builder.CreateTrunc(Step, TruncType); 1523 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1524 } 1525 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1526 Value *SteppedStart = 1527 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1528 1529 // We create vector phi nodes for both integer and floating-point induction 1530 // variables. Here, we determine the kind of arithmetic we will perform. 1531 Instruction::BinaryOps AddOp; 1532 Instruction::BinaryOps MulOp; 1533 if (Step->getType()->isIntegerTy()) { 1534 AddOp = Instruction::Add; 1535 MulOp = Instruction::Mul; 1536 } else { 1537 AddOp = II.getInductionOpcode(); 1538 MulOp = Instruction::FMul; 1539 } 1540 1541 // Multiply the vectorization factor by the step using integer or 1542 // floating-point arithmetic as appropriate. 1543 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1544 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1545 1546 // Create a vector splat to use in the induction update. 1547 // 1548 // FIXME: If the step is non-constant, we create the vector splat with 1549 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1550 // handle a constant vector splat. 1551 Value *SplatVF = isa<Constant>(Mul) 1552 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 1553 : Builder.CreateVectorSplat(VF, Mul); 1554 Builder.restoreIP(CurrIP); 1555 1556 // We may need to add the step a number of times, depending on the unroll 1557 // factor. The last of those goes into the PHI. 1558 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1559 &*LoopVectorBody->getFirstInsertionPt()); 1560 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1561 Instruction *LastInduction = VecInd; 1562 for (unsigned Part = 0; Part < UF; ++Part) { 1563 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1564 1565 if (isa<TruncInst>(EntryVal)) 1566 addMetadata(LastInduction, EntryVal); 1567 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1568 1569 LastInduction = cast<Instruction>(addFastMathFlag( 1570 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1571 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1572 } 1573 1574 // Move the last step to the end of the latch block. This ensures consistent 1575 // placement of all induction updates. 1576 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1577 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1578 auto *ICmp = cast<Instruction>(Br->getCondition()); 1579 LastInduction->moveBefore(ICmp); 1580 LastInduction->setName("vec.ind.next"); 1581 1582 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1583 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1584 } 1585 1586 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1587 return Cost->isScalarAfterVectorization(I, VF) || 1588 Cost->isProfitableToScalarize(I, VF); 1589 } 1590 1591 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1592 if (shouldScalarizeInstruction(IV)) 1593 return true; 1594 auto isScalarInst = [&](User *U) -> bool { 1595 auto *I = cast<Instruction>(U); 1596 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1597 }; 1598 return llvm::any_of(IV->users(), isScalarInst); 1599 } 1600 1601 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1602 const InductionDescriptor &ID, const Instruction *EntryVal, 1603 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1604 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1605 "Expected either an induction phi-node or a truncate of it!"); 1606 1607 // This induction variable is not the phi from the original loop but the 1608 // newly-created IV based on the proof that casted Phi is equal to the 1609 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1610 // re-uses the same InductionDescriptor that original IV uses but we don't 1611 // have to do any recording in this case - that is done when original IV is 1612 // processed. 1613 if (isa<TruncInst>(EntryVal)) 1614 return; 1615 1616 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1617 if (Casts.empty()) 1618 return; 1619 // Only the first Cast instruction in the Casts vector is of interest. 1620 // The rest of the Casts (if exist) have no uses outside the 1621 // induction update chain itself. 1622 Instruction *CastInst = *Casts.begin(); 1623 if (Lane < UINT_MAX) 1624 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1625 else 1626 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1627 } 1628 1629 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1630 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1631 "Primary induction variable must have an integer type"); 1632 1633 auto II = Legal->getInductionVars()->find(IV); 1634 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 1635 1636 auto ID = II->second; 1637 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1638 1639 // The scalar value to broadcast. This will be derived from the canonical 1640 // induction variable. 1641 Value *ScalarIV = nullptr; 1642 1643 // The value from the original loop to which we are mapping the new induction 1644 // variable. 1645 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1646 1647 // True if we have vectorized the induction variable. 1648 auto VectorizedIV = false; 1649 1650 // Determine if we want a scalar version of the induction variable. This is 1651 // true if the induction variable itself is not widened, or if it has at 1652 // least one user in the loop that is not widened. 1653 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 1654 1655 // Generate code for the induction step. Note that induction steps are 1656 // required to be loop-invariant 1657 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 1658 "Induction step should be loop invariant"); 1659 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1660 Value *Step = nullptr; 1661 if (PSE.getSE()->isSCEVable(IV->getType())) { 1662 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1663 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 1664 LoopVectorPreHeader->getTerminator()); 1665 } else { 1666 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 1667 } 1668 1669 // Try to create a new independent vector induction variable. If we can't 1670 // create the phi node, we will splat the scalar induction variable in each 1671 // loop iteration. 1672 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 1673 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1674 VectorizedIV = true; 1675 } 1676 1677 // If we haven't yet vectorized the induction variable, or if we will create 1678 // a scalar one, we need to define the scalar induction variable and step 1679 // values. If we were given a truncation type, truncate the canonical 1680 // induction variable and step. Otherwise, derive these values from the 1681 // induction descriptor. 1682 if (!VectorizedIV || NeedsScalarIV) { 1683 ScalarIV = Induction; 1684 if (IV != OldInduction) { 1685 ScalarIV = IV->getType()->isIntegerTy() 1686 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1687 : Builder.CreateCast(Instruction::SIToFP, Induction, 1688 IV->getType()); 1689 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1690 ScalarIV->setName("offset.idx"); 1691 } 1692 if (Trunc) { 1693 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1694 assert(Step->getType()->isIntegerTy() && 1695 "Truncation requires an integer step"); 1696 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1697 Step = Builder.CreateTrunc(Step, TruncType); 1698 } 1699 } 1700 1701 // If we haven't yet vectorized the induction variable, splat the scalar 1702 // induction variable, and build the necessary step vectors. 1703 // TODO: Don't do it unless the vectorized IV is really required. 1704 if (!VectorizedIV) { 1705 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1706 for (unsigned Part = 0; Part < UF; ++Part) { 1707 Value *EntryPart = 1708 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1709 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1710 if (Trunc) 1711 addMetadata(EntryPart, Trunc); 1712 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1713 } 1714 } 1715 1716 // If an induction variable is only used for counting loop iterations or 1717 // calculating addresses, it doesn't need to be widened. Create scalar steps 1718 // that can be used by instructions we will later scalarize. Note that the 1719 // addition of the scalar steps will not increase the number of instructions 1720 // in the loop in the common case prior to InstCombine. We will be trading 1721 // one vector extract for each scalar step. 1722 if (NeedsScalarIV) 1723 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1724 } 1725 1726 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1727 Instruction::BinaryOps BinOp) { 1728 // Create and check the types. 1729 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1730 int VLen = Val->getType()->getVectorNumElements(); 1731 1732 Type *STy = Val->getType()->getScalarType(); 1733 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1734 "Induction Step must be an integer or FP"); 1735 assert(Step->getType() == STy && "Step has wrong type"); 1736 1737 SmallVector<Constant *, 8> Indices; 1738 1739 if (STy->isIntegerTy()) { 1740 // Create a vector of consecutive numbers from zero to VF. 1741 for (int i = 0; i < VLen; ++i) 1742 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1743 1744 // Add the consecutive indices to the vector value. 1745 Constant *Cv = ConstantVector::get(Indices); 1746 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1747 Step = Builder.CreateVectorSplat(VLen, Step); 1748 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1749 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1750 // which can be found from the original scalar operations. 1751 Step = Builder.CreateMul(Cv, Step); 1752 return Builder.CreateAdd(Val, Step, "induction"); 1753 } 1754 1755 // Floating point induction. 1756 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1757 "Binary Opcode should be specified for FP induction"); 1758 // Create a vector of consecutive numbers from zero to VF. 1759 for (int i = 0; i < VLen; ++i) 1760 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1761 1762 // Add the consecutive indices to the vector value. 1763 Constant *Cv = ConstantVector::get(Indices); 1764 1765 Step = Builder.CreateVectorSplat(VLen, Step); 1766 1767 // Floating point operations had to be 'fast' to enable the induction. 1768 FastMathFlags Flags; 1769 Flags.setFast(); 1770 1771 Value *MulOp = Builder.CreateFMul(Cv, Step); 1772 if (isa<Instruction>(MulOp)) 1773 // Have to check, MulOp may be a constant 1774 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1775 1776 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1777 if (isa<Instruction>(BOp)) 1778 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1779 return BOp; 1780 } 1781 1782 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1783 Instruction *EntryVal, 1784 const InductionDescriptor &ID) { 1785 // We shouldn't have to build scalar steps if we aren't vectorizing. 1786 assert(VF > 1 && "VF should be greater than one"); 1787 1788 // Get the value type and ensure it and the step have the same integer type. 1789 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1790 assert(ScalarIVTy == Step->getType() && 1791 "Val and Step should have the same type"); 1792 1793 // We build scalar steps for both integer and floating-point induction 1794 // variables. Here, we determine the kind of arithmetic we will perform. 1795 Instruction::BinaryOps AddOp; 1796 Instruction::BinaryOps MulOp; 1797 if (ScalarIVTy->isIntegerTy()) { 1798 AddOp = Instruction::Add; 1799 MulOp = Instruction::Mul; 1800 } else { 1801 AddOp = ID.getInductionOpcode(); 1802 MulOp = Instruction::FMul; 1803 } 1804 1805 // Determine the number of scalars we need to generate for each unroll 1806 // iteration. If EntryVal is uniform, we only need to generate the first 1807 // lane. Otherwise, we generate all VF values. 1808 unsigned Lanes = 1809 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 1810 : VF; 1811 // Compute the scalar steps and save the results in VectorLoopValueMap. 1812 for (unsigned Part = 0; Part < UF; ++Part) { 1813 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 1814 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 1815 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 1816 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 1817 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 1818 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 1819 } 1820 } 1821 } 1822 1823 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 1824 assert(V != Induction && "The new induction variable should not be used."); 1825 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 1826 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1827 1828 // If we have a stride that is replaced by one, do it here. Defer this for 1829 // the VPlan-native path until we start running Legal checks in that path. 1830 if (!EnableVPlanNativePath && Legal->hasStride(V)) 1831 V = ConstantInt::get(V->getType(), 1); 1832 1833 // If we have a vector mapped to this value, return it. 1834 if (VectorLoopValueMap.hasVectorValue(V, Part)) 1835 return VectorLoopValueMap.getVectorValue(V, Part); 1836 1837 // If the value has not been vectorized, check if it has been scalarized 1838 // instead. If it has been scalarized, and we actually need the value in 1839 // vector form, we will construct the vector values on demand. 1840 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 1841 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 1842 1843 // If we've scalarized a value, that value should be an instruction. 1844 auto *I = cast<Instruction>(V); 1845 1846 // If we aren't vectorizing, we can just copy the scalar map values over to 1847 // the vector map. 1848 if (VF == 1) { 1849 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 1850 return ScalarValue; 1851 } 1852 1853 // Get the last scalar instruction we generated for V and Part. If the value 1854 // is known to be uniform after vectorization, this corresponds to lane zero 1855 // of the Part unroll iteration. Otherwise, the last instruction is the one 1856 // we created for the last vector lane of the Part unroll iteration. 1857 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 1858 auto *LastInst = cast<Instruction>( 1859 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 1860 1861 // Set the insert point after the last scalarized instruction. This ensures 1862 // the insertelement sequence will directly follow the scalar definitions. 1863 auto OldIP = Builder.saveIP(); 1864 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 1865 Builder.SetInsertPoint(&*NewIP); 1866 1867 // However, if we are vectorizing, we need to construct the vector values. 1868 // If the value is known to be uniform after vectorization, we can just 1869 // broadcast the scalar value corresponding to lane zero for each unroll 1870 // iteration. Otherwise, we construct the vector values using insertelement 1871 // instructions. Since the resulting vectors are stored in 1872 // VectorLoopValueMap, we will only generate the insertelements once. 1873 Value *VectorValue = nullptr; 1874 if (Cost->isUniformAfterVectorization(I, VF)) { 1875 VectorValue = getBroadcastInstrs(ScalarValue); 1876 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 1877 } else { 1878 // Initialize packing with insertelements to start from undef. 1879 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 1880 VectorLoopValueMap.setVectorValue(V, Part, Undef); 1881 for (unsigned Lane = 0; Lane < VF; ++Lane) 1882 packScalarIntoVectorValue(V, {Part, Lane}); 1883 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 1884 } 1885 Builder.restoreIP(OldIP); 1886 return VectorValue; 1887 } 1888 1889 // If this scalar is unknown, assume that it is a constant or that it is 1890 // loop invariant. Broadcast V and save the value for future uses. 1891 Value *B = getBroadcastInstrs(V); 1892 VectorLoopValueMap.setVectorValue(V, Part, B); 1893 return B; 1894 } 1895 1896 Value * 1897 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 1898 const VPIteration &Instance) { 1899 // If the value is not an instruction contained in the loop, it should 1900 // already be scalar. 1901 if (OrigLoop->isLoopInvariant(V)) 1902 return V; 1903 1904 assert(Instance.Lane > 0 1905 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 1906 : true && "Uniform values only have lane zero"); 1907 1908 // If the value from the original loop has not been vectorized, it is 1909 // represented by UF x VF scalar values in the new loop. Return the requested 1910 // scalar value. 1911 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 1912 return VectorLoopValueMap.getScalarValue(V, Instance); 1913 1914 // If the value has not been scalarized, get its entry in VectorLoopValueMap 1915 // for the given unroll part. If this entry is not a vector type (i.e., the 1916 // vectorization factor is one), there is no need to generate an 1917 // extractelement instruction. 1918 auto *U = getOrCreateVectorValue(V, Instance.Part); 1919 if (!U->getType()->isVectorTy()) { 1920 assert(VF == 1 && "Value not scalarized has non-vector type"); 1921 return U; 1922 } 1923 1924 // Otherwise, the value from the original loop has been vectorized and is 1925 // represented by UF vector values. Extract and return the requested scalar 1926 // value from the appropriate vector lane. 1927 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 1928 } 1929 1930 void InnerLoopVectorizer::packScalarIntoVectorValue( 1931 Value *V, const VPIteration &Instance) { 1932 assert(V != Induction && "The new induction variable should not be used."); 1933 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 1934 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1935 1936 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 1937 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 1938 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 1939 Builder.getInt32(Instance.Lane)); 1940 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 1941 } 1942 1943 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 1944 assert(Vec->getType()->isVectorTy() && "Invalid type"); 1945 SmallVector<Constant *, 8> ShuffleMask; 1946 for (unsigned i = 0; i < VF; ++i) 1947 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 1948 1949 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 1950 ConstantVector::get(ShuffleMask), 1951 "reverse"); 1952 } 1953 1954 // Return whether we allow using masked interleave-groups (for dealing with 1955 // strided loads/stores that reside in predicated blocks, or for dealing 1956 // with gaps). 1957 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 1958 // If an override option has been passed in for interleaved accesses, use it. 1959 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 1960 return EnableMaskedInterleavedMemAccesses; 1961 1962 return TTI.enableMaskedInterleavedAccessVectorization(); 1963 } 1964 1965 // Try to vectorize the interleave group that \p Instr belongs to. 1966 // 1967 // E.g. Translate following interleaved load group (factor = 3): 1968 // for (i = 0; i < N; i+=3) { 1969 // R = Pic[i]; // Member of index 0 1970 // G = Pic[i+1]; // Member of index 1 1971 // B = Pic[i+2]; // Member of index 2 1972 // ... // do something to R, G, B 1973 // } 1974 // To: 1975 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 1976 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 1977 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 1978 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 1979 // 1980 // Or translate following interleaved store group (factor = 3): 1981 // for (i = 0; i < N; i+=3) { 1982 // ... do something to R, G, B 1983 // Pic[i] = R; // Member of index 0 1984 // Pic[i+1] = G; // Member of index 1 1985 // Pic[i+2] = B; // Member of index 2 1986 // } 1987 // To: 1988 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 1989 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 1990 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 1991 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 1992 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 1993 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr, 1994 VectorParts *BlockInMask) { 1995 const InterleaveGroup<Instruction> *Group = 1996 Cost->getInterleavedAccessGroup(Instr); 1997 assert(Group && "Fail to get an interleaved access group."); 1998 1999 // Skip if current instruction is not the insert position. 2000 if (Instr != Group->getInsertPos()) 2001 return; 2002 2003 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2004 Value *Ptr = getLoadStorePointerOperand(Instr); 2005 2006 // Prepare for the vector type of the interleaved load/store. 2007 Type *ScalarTy = getMemInstValueType(Instr); 2008 unsigned InterleaveFactor = Group->getFactor(); 2009 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2010 Type *PtrTy = VecTy->getPointerTo(getLoadStoreAddressSpace(Instr)); 2011 2012 // Prepare for the new pointers. 2013 setDebugLocFromInst(Builder, Ptr); 2014 SmallVector<Value *, 2> NewPtrs; 2015 unsigned Index = Group->getIndex(Instr); 2016 2017 VectorParts Mask; 2018 bool IsMaskForCondRequired = BlockInMask; 2019 if (IsMaskForCondRequired) { 2020 Mask = *BlockInMask; 2021 // TODO: extend the masked interleaved-group support to reversed access. 2022 assert(!Group->isReverse() && "Reversed masked interleave-group " 2023 "not supported."); 2024 } 2025 2026 // If the group is reverse, adjust the index to refer to the last vector lane 2027 // instead of the first. We adjust the index from the first vector lane, 2028 // rather than directly getting the pointer for lane VF - 1, because the 2029 // pointer operand of the interleaved access is supposed to be uniform. For 2030 // uniform instructions, we're only required to generate a value for the 2031 // first vector lane in each unroll iteration. 2032 if (Group->isReverse()) 2033 Index += (VF - 1) * Group->getFactor(); 2034 2035 bool InBounds = false; 2036 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2037 InBounds = gep->isInBounds(); 2038 2039 for (unsigned Part = 0; Part < UF; Part++) { 2040 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2041 2042 // Notice current instruction could be any index. Need to adjust the address 2043 // to the member of index 0. 2044 // 2045 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2046 // b = A[i]; // Member of index 0 2047 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2048 // 2049 // E.g. A[i+1] = a; // Member of index 1 2050 // A[i] = b; // Member of index 0 2051 // A[i+2] = c; // Member of index 2 (Current instruction) 2052 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2053 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2054 if (InBounds) 2055 cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true); 2056 2057 // Cast to the vector pointer type. 2058 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2059 } 2060 2061 setDebugLocFromInst(Builder, Instr); 2062 Value *UndefVec = UndefValue::get(VecTy); 2063 2064 Value *MaskForGaps = nullptr; 2065 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2066 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2067 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2068 } 2069 2070 // Vectorize the interleaved load group. 2071 if (isa<LoadInst>(Instr)) { 2072 // For each unroll part, create a wide load for the group. 2073 SmallVector<Value *, 2> NewLoads; 2074 for (unsigned Part = 0; Part < UF; Part++) { 2075 Instruction *NewLoad; 2076 if (IsMaskForCondRequired || MaskForGaps) { 2077 assert(useMaskedInterleavedAccesses(*TTI) && 2078 "masked interleaved groups are not allowed."); 2079 Value *GroupMask = MaskForGaps; 2080 if (IsMaskForCondRequired) { 2081 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2082 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2083 Value *ShuffledMask = Builder.CreateShuffleVector( 2084 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2085 GroupMask = MaskForGaps 2086 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2087 MaskForGaps) 2088 : ShuffledMask; 2089 } 2090 NewLoad = 2091 Builder.CreateMaskedLoad(NewPtrs[Part], Group->getAlignment(), 2092 GroupMask, UndefVec, "wide.masked.vec"); 2093 } 2094 else 2095 NewLoad = Builder.CreateAlignedLoad(NewPtrs[Part], 2096 Group->getAlignment(), "wide.vec"); 2097 Group->addMetadata(NewLoad); 2098 NewLoads.push_back(NewLoad); 2099 } 2100 2101 // For each member in the group, shuffle out the appropriate data from the 2102 // wide loads. 2103 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2104 Instruction *Member = Group->getMember(I); 2105 2106 // Skip the gaps in the group. 2107 if (!Member) 2108 continue; 2109 2110 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2111 for (unsigned Part = 0; Part < UF; Part++) { 2112 Value *StridedVec = Builder.CreateShuffleVector( 2113 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2114 2115 // If this member has different type, cast the result type. 2116 if (Member->getType() != ScalarTy) { 2117 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2118 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2119 } 2120 2121 if (Group->isReverse()) 2122 StridedVec = reverseVector(StridedVec); 2123 2124 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2125 } 2126 } 2127 return; 2128 } 2129 2130 // The sub vector type for current instruction. 2131 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2132 2133 // Vectorize the interleaved store group. 2134 for (unsigned Part = 0; Part < UF; Part++) { 2135 // Collect the stored vector from each member. 2136 SmallVector<Value *, 4> StoredVecs; 2137 for (unsigned i = 0; i < InterleaveFactor; i++) { 2138 // Interleaved store group doesn't allow a gap, so each index has a member 2139 Instruction *Member = Group->getMember(i); 2140 assert(Member && "Fail to get a member from an interleaved store group"); 2141 2142 Value *StoredVec = getOrCreateVectorValue( 2143 cast<StoreInst>(Member)->getValueOperand(), Part); 2144 if (Group->isReverse()) 2145 StoredVec = reverseVector(StoredVec); 2146 2147 // If this member has different type, cast it to a unified type. 2148 2149 if (StoredVec->getType() != SubVT) 2150 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2151 2152 StoredVecs.push_back(StoredVec); 2153 } 2154 2155 // Concatenate all vectors into a wide vector. 2156 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2157 2158 // Interleave the elements in the wide vector. 2159 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2160 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2161 "interleaved.vec"); 2162 2163 Instruction *NewStoreInstr; 2164 if (IsMaskForCondRequired) { 2165 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2166 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2167 Value *ShuffledMask = Builder.CreateShuffleVector( 2168 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2169 NewStoreInstr = Builder.CreateMaskedStore( 2170 IVec, NewPtrs[Part], Group->getAlignment(), ShuffledMask); 2171 } 2172 else 2173 NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part], 2174 Group->getAlignment()); 2175 2176 Group->addMetadata(NewStoreInstr); 2177 } 2178 } 2179 2180 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2181 VectorParts *BlockInMask) { 2182 // Attempt to issue a wide load. 2183 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2184 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2185 2186 assert((LI || SI) && "Invalid Load/Store instruction"); 2187 2188 LoopVectorizationCostModel::InstWidening Decision = 2189 Cost->getWideningDecision(Instr, VF); 2190 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2191 "CM decision should be taken at this point"); 2192 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2193 return vectorizeInterleaveGroup(Instr); 2194 2195 Type *ScalarDataTy = getMemInstValueType(Instr); 2196 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2197 Value *Ptr = getLoadStorePointerOperand(Instr); 2198 unsigned Alignment = getLoadStoreAlignment(Instr); 2199 // An alignment of 0 means target abi alignment. We need to use the scalar's 2200 // target abi alignment in such a case. 2201 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2202 if (!Alignment) 2203 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2204 unsigned AddressSpace = getLoadStoreAddressSpace(Instr); 2205 2206 // Determine if the pointer operand of the access is either consecutive or 2207 // reverse consecutive. 2208 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2209 bool ConsecutiveStride = 2210 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2211 bool CreateGatherScatter = 2212 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2213 2214 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2215 // gather/scatter. Otherwise Decision should have been to Scalarize. 2216 assert((ConsecutiveStride || CreateGatherScatter) && 2217 "The instruction should be scalarized"); 2218 2219 // Handle consecutive loads/stores. 2220 if (ConsecutiveStride) 2221 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 2222 2223 VectorParts Mask; 2224 bool isMaskRequired = BlockInMask; 2225 if (isMaskRequired) 2226 Mask = *BlockInMask; 2227 2228 bool InBounds = false; 2229 if (auto *gep = dyn_cast<GetElementPtrInst>( 2230 getLoadStorePointerOperand(Instr)->stripPointerCasts())) 2231 InBounds = gep->isInBounds(); 2232 2233 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2234 // Calculate the pointer for the specific unroll-part. 2235 GetElementPtrInst *PartPtr = nullptr; 2236 2237 if (Reverse) { 2238 // If the address is consecutive but reversed, then the 2239 // wide store needs to start at the last vector element. 2240 PartPtr = cast<GetElementPtrInst>( 2241 Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF))); 2242 PartPtr->setIsInBounds(InBounds); 2243 PartPtr = cast<GetElementPtrInst>( 2244 Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF))); 2245 PartPtr->setIsInBounds(InBounds); 2246 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2247 Mask[Part] = reverseVector(Mask[Part]); 2248 } else { 2249 PartPtr = cast<GetElementPtrInst>( 2250 Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF))); 2251 PartPtr->setIsInBounds(InBounds); 2252 } 2253 2254 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2255 }; 2256 2257 // Handle Stores: 2258 if (SI) { 2259 setDebugLocFromInst(Builder, SI); 2260 2261 for (unsigned Part = 0; Part < UF; ++Part) { 2262 Instruction *NewSI = nullptr; 2263 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 2264 if (CreateGatherScatter) { 2265 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2266 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2267 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2268 MaskPart); 2269 } else { 2270 if (Reverse) { 2271 // If we store to reverse consecutive memory locations, then we need 2272 // to reverse the order of elements in the stored value. 2273 StoredVal = reverseVector(StoredVal); 2274 // We don't want to update the value in the map as it might be used in 2275 // another expression. So don't call resetVectorValue(StoredVal). 2276 } 2277 auto *VecPtr = CreateVecPtr(Part, Ptr); 2278 if (isMaskRequired) 2279 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2280 Mask[Part]); 2281 else 2282 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2283 } 2284 addMetadata(NewSI, SI); 2285 } 2286 return; 2287 } 2288 2289 // Handle loads. 2290 assert(LI && "Must have a load instruction"); 2291 setDebugLocFromInst(Builder, LI); 2292 for (unsigned Part = 0; Part < UF; ++Part) { 2293 Value *NewLI; 2294 if (CreateGatherScatter) { 2295 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2296 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2297 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2298 nullptr, "wide.masked.gather"); 2299 addMetadata(NewLI, LI); 2300 } else { 2301 auto *VecPtr = CreateVecPtr(Part, Ptr); 2302 if (isMaskRequired) 2303 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2304 UndefValue::get(DataTy), 2305 "wide.masked.load"); 2306 else 2307 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2308 2309 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2310 addMetadata(NewLI, LI); 2311 if (Reverse) 2312 NewLI = reverseVector(NewLI); 2313 } 2314 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2315 } 2316 } 2317 2318 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2319 const VPIteration &Instance, 2320 bool IfPredicateInstr) { 2321 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2322 2323 setDebugLocFromInst(Builder, Instr); 2324 2325 // Does this instruction return a value ? 2326 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2327 2328 Instruction *Cloned = Instr->clone(); 2329 if (!IsVoidRetTy) 2330 Cloned->setName(Instr->getName() + ".cloned"); 2331 2332 // Replace the operands of the cloned instructions with their scalar 2333 // equivalents in the new loop. 2334 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2335 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2336 Cloned->setOperand(op, NewOp); 2337 } 2338 addNewMetadata(Cloned, Instr); 2339 2340 // Place the cloned scalar in the new loop. 2341 Builder.Insert(Cloned); 2342 2343 // Add the cloned scalar to the scalar map entry. 2344 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2345 2346 // If we just cloned a new assumption, add it the assumption cache. 2347 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2348 if (II->getIntrinsicID() == Intrinsic::assume) 2349 AC->registerAssumption(II); 2350 2351 // End if-block. 2352 if (IfPredicateInstr) 2353 PredicatedInstructions.push_back(Cloned); 2354 } 2355 2356 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2357 Value *End, Value *Step, 2358 Instruction *DL) { 2359 BasicBlock *Header = L->getHeader(); 2360 BasicBlock *Latch = L->getLoopLatch(); 2361 // As we're just creating this loop, it's possible no latch exists 2362 // yet. If so, use the header as this will be a single block loop. 2363 if (!Latch) 2364 Latch = Header; 2365 2366 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2367 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2368 setDebugLocFromInst(Builder, OldInst); 2369 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2370 2371 Builder.SetInsertPoint(Latch->getTerminator()); 2372 setDebugLocFromInst(Builder, OldInst); 2373 2374 // Create i+1 and fill the PHINode. 2375 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2376 Induction->addIncoming(Start, L->getLoopPreheader()); 2377 Induction->addIncoming(Next, Latch); 2378 // Create the compare. 2379 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2380 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2381 2382 // Now we have two terminators. Remove the old one from the block. 2383 Latch->getTerminator()->eraseFromParent(); 2384 2385 return Induction; 2386 } 2387 2388 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2389 if (TripCount) 2390 return TripCount; 2391 2392 assert(L && "Create Trip Count for null loop."); 2393 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2394 // Find the loop boundaries. 2395 ScalarEvolution *SE = PSE.getSE(); 2396 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2397 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2398 "Invalid loop count"); 2399 2400 Type *IdxTy = Legal->getWidestInductionType(); 2401 assert(IdxTy && "No type for induction"); 2402 2403 // The exit count might have the type of i64 while the phi is i32. This can 2404 // happen if we have an induction variable that is sign extended before the 2405 // compare. The only way that we get a backedge taken count is that the 2406 // induction variable was signed and as such will not overflow. In such a case 2407 // truncation is legal. 2408 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2409 IdxTy->getPrimitiveSizeInBits()) 2410 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2411 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2412 2413 // Get the total trip count from the count by adding 1. 2414 const SCEV *ExitCount = SE->getAddExpr( 2415 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2416 2417 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2418 2419 // Expand the trip count and place the new instructions in the preheader. 2420 // Notice that the pre-header does not change, only the loop body. 2421 SCEVExpander Exp(*SE, DL, "induction"); 2422 2423 // Count holds the overall loop count (N). 2424 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2425 L->getLoopPreheader()->getTerminator()); 2426 2427 if (TripCount->getType()->isPointerTy()) 2428 TripCount = 2429 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2430 L->getLoopPreheader()->getTerminator()); 2431 2432 return TripCount; 2433 } 2434 2435 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2436 if (VectorTripCount) 2437 return VectorTripCount; 2438 2439 Value *TC = getOrCreateTripCount(L); 2440 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2441 2442 Type *Ty = TC->getType(); 2443 Constant *Step = ConstantInt::get(Ty, VF * UF); 2444 2445 // If the tail is to be folded by masking, round the number of iterations N 2446 // up to a multiple of Step instead of rounding down. This is done by first 2447 // adding Step-1 and then rounding down. Note that it's ok if this addition 2448 // overflows: the vector induction variable will eventually wrap to zero given 2449 // that it starts at zero and its Step is a power of two; the loop will then 2450 // exit, with the last early-exit vector comparison also producing all-true. 2451 if (Cost->foldTailByMasking()) { 2452 assert(isPowerOf2_32(VF * UF) && 2453 "VF*UF must be a power of 2 when folding tail by masking"); 2454 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2455 } 2456 2457 // Now we need to generate the expression for the part of the loop that the 2458 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2459 // iterations are not required for correctness, or N - Step, otherwise. Step 2460 // is equal to the vectorization factor (number of SIMD elements) times the 2461 // unroll factor (number of SIMD instructions). 2462 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2463 2464 // If there is a non-reversed interleaved group that may speculatively access 2465 // memory out-of-bounds, we need to ensure that there will be at least one 2466 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2467 // the trip count, we set the remainder to be equal to the step. If the step 2468 // does not evenly divide the trip count, no adjustment is necessary since 2469 // there will already be scalar iterations. Note that the minimum iterations 2470 // check ensures that N >= Step. 2471 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2472 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2473 R = Builder.CreateSelect(IsZero, Step, R); 2474 } 2475 2476 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2477 2478 return VectorTripCount; 2479 } 2480 2481 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2482 const DataLayout &DL) { 2483 // Verify that V is a vector type with same number of elements as DstVTy. 2484 unsigned VF = DstVTy->getNumElements(); 2485 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2486 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2487 Type *SrcElemTy = SrcVecTy->getElementType(); 2488 Type *DstElemTy = DstVTy->getElementType(); 2489 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2490 "Vector elements must have same size"); 2491 2492 // Do a direct cast if element types are castable. 2493 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2494 return Builder.CreateBitOrPointerCast(V, DstVTy); 2495 } 2496 // V cannot be directly casted to desired vector type. 2497 // May happen when V is a floating point vector but DstVTy is a vector of 2498 // pointers or vice-versa. Handle this using a two-step bitcast using an 2499 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2500 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2501 "Only one type should be a pointer type"); 2502 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2503 "Only one type should be a floating point type"); 2504 Type *IntTy = 2505 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2506 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2507 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2508 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2509 } 2510 2511 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2512 BasicBlock *Bypass) { 2513 Value *Count = getOrCreateTripCount(L); 2514 BasicBlock *BB = L->getLoopPreheader(); 2515 IRBuilder<> Builder(BB->getTerminator()); 2516 2517 // Generate code to check if the loop's trip count is less than VF * UF, or 2518 // equal to it in case a scalar epilogue is required; this implies that the 2519 // vector trip count is zero. This check also covers the case where adding one 2520 // to the backedge-taken count overflowed leading to an incorrect trip count 2521 // of zero. In this case we will also jump to the scalar loop. 2522 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2523 : ICmpInst::ICMP_ULT; 2524 2525 // If tail is to be folded, vector loop takes care of all iterations. 2526 Value *CheckMinIters = Builder.getFalse(); 2527 if (!Cost->foldTailByMasking()) 2528 CheckMinIters = Builder.CreateICmp( 2529 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2530 "min.iters.check"); 2531 2532 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2533 // Update dominator tree immediately if the generated block is a 2534 // LoopBypassBlock because SCEV expansions to generate loop bypass 2535 // checks may query it before the current function is finished. 2536 DT->addNewBlock(NewBB, BB); 2537 if (L->getParentLoop()) 2538 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2539 ReplaceInstWithInst(BB->getTerminator(), 2540 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2541 LoopBypassBlocks.push_back(BB); 2542 } 2543 2544 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2545 BasicBlock *BB = L->getLoopPreheader(); 2546 2547 // Generate the code to check that the SCEV assumptions that we made. 2548 // We want the new basic block to start at the first instruction in a 2549 // sequence of instructions that form a check. 2550 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2551 "scev.check"); 2552 Value *SCEVCheck = 2553 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2554 2555 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2556 if (C->isZero()) 2557 return; 2558 2559 assert(!Cost->foldTailByMasking() && 2560 "Cannot SCEV check stride or overflow when folding tail"); 2561 // Create a new block containing the stride check. 2562 BB->setName("vector.scevcheck"); 2563 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2564 // Update dominator tree immediately if the generated block is a 2565 // LoopBypassBlock because SCEV expansions to generate loop bypass 2566 // checks may query it before the current function is finished. 2567 DT->addNewBlock(NewBB, BB); 2568 if (L->getParentLoop()) 2569 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2570 ReplaceInstWithInst(BB->getTerminator(), 2571 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2572 LoopBypassBlocks.push_back(BB); 2573 AddedSafetyChecks = true; 2574 } 2575 2576 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2577 // VPlan-native path does not do any analysis for runtime checks currently. 2578 if (EnableVPlanNativePath) 2579 return; 2580 2581 BasicBlock *BB = L->getLoopPreheader(); 2582 2583 // Generate the code that checks in runtime if arrays overlap. We put the 2584 // checks into a separate block to make the more common case of few elements 2585 // faster. 2586 Instruction *FirstCheckInst; 2587 Instruction *MemRuntimeCheck; 2588 std::tie(FirstCheckInst, MemRuntimeCheck) = 2589 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2590 if (!MemRuntimeCheck) 2591 return; 2592 2593 assert(!Cost->foldTailByMasking() && "Cannot check memory when folding tail"); 2594 // Create a new block containing the memory check. 2595 BB->setName("vector.memcheck"); 2596 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2597 // Update dominator tree immediately if the generated block is a 2598 // LoopBypassBlock because SCEV expansions to generate loop bypass 2599 // checks may query it before the current function is finished. 2600 DT->addNewBlock(NewBB, BB); 2601 if (L->getParentLoop()) 2602 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2603 ReplaceInstWithInst(BB->getTerminator(), 2604 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2605 LoopBypassBlocks.push_back(BB); 2606 AddedSafetyChecks = true; 2607 2608 // We currently don't use LoopVersioning for the actual loop cloning but we 2609 // still use it to add the noalias metadata. 2610 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2611 PSE.getSE()); 2612 LVer->prepareNoAliasMetadata(); 2613 } 2614 2615 Value *InnerLoopVectorizer::emitTransformedIndex( 2616 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2617 const InductionDescriptor &ID) const { 2618 2619 SCEVExpander Exp(*SE, DL, "induction"); 2620 auto Step = ID.getStep(); 2621 auto StartValue = ID.getStartValue(); 2622 assert(Index->getType() == Step->getType() && 2623 "Index type does not match StepValue type"); 2624 2625 // Note: the IR at this point is broken. We cannot use SE to create any new 2626 // SCEV and then expand it, hoping that SCEV's simplification will give us 2627 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2628 // lead to various SCEV crashes. So all we can do is to use builder and rely 2629 // on InstCombine for future simplifications. Here we handle some trivial 2630 // cases only. 2631 auto CreateAdd = [&B](Value *X, Value *Y) { 2632 assert(X->getType() == Y->getType() && "Types don't match!"); 2633 if (auto *CX = dyn_cast<ConstantInt>(X)) 2634 if (CX->isZero()) 2635 return Y; 2636 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2637 if (CY->isZero()) 2638 return X; 2639 return B.CreateAdd(X, Y); 2640 }; 2641 2642 auto CreateMul = [&B](Value *X, Value *Y) { 2643 assert(X->getType() == Y->getType() && "Types don't match!"); 2644 if (auto *CX = dyn_cast<ConstantInt>(X)) 2645 if (CX->isOne()) 2646 return Y; 2647 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2648 if (CY->isOne()) 2649 return X; 2650 return B.CreateMul(X, Y); 2651 }; 2652 2653 switch (ID.getKind()) { 2654 case InductionDescriptor::IK_IntInduction: { 2655 assert(Index->getType() == StartValue->getType() && 2656 "Index type does not match StartValue type"); 2657 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2658 return B.CreateSub(StartValue, Index); 2659 auto *Offset = CreateMul( 2660 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2661 return CreateAdd(StartValue, Offset); 2662 } 2663 case InductionDescriptor::IK_PtrInduction: { 2664 assert(isa<SCEVConstant>(Step) && 2665 "Expected constant step for pointer induction"); 2666 return B.CreateGEP( 2667 nullptr, StartValue, 2668 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2669 &*B.GetInsertPoint()))); 2670 } 2671 case InductionDescriptor::IK_FpInduction: { 2672 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2673 auto InductionBinOp = ID.getInductionBinOp(); 2674 assert(InductionBinOp && 2675 (InductionBinOp->getOpcode() == Instruction::FAdd || 2676 InductionBinOp->getOpcode() == Instruction::FSub) && 2677 "Original bin op should be defined for FP induction"); 2678 2679 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2680 2681 // Floating point operations had to be 'fast' to enable the induction. 2682 FastMathFlags Flags; 2683 Flags.setFast(); 2684 2685 Value *MulExp = B.CreateFMul(StepValue, Index); 2686 if (isa<Instruction>(MulExp)) 2687 // We have to check, the MulExp may be a constant. 2688 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2689 2690 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2691 "induction"); 2692 if (isa<Instruction>(BOp)) 2693 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2694 2695 return BOp; 2696 } 2697 case InductionDescriptor::IK_NoInduction: 2698 return nullptr; 2699 } 2700 llvm_unreachable("invalid enum"); 2701 } 2702 2703 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2704 /* 2705 In this function we generate a new loop. The new loop will contain 2706 the vectorized instructions while the old loop will continue to run the 2707 scalar remainder. 2708 2709 [ ] <-- loop iteration number check. 2710 / | 2711 / v 2712 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2713 | / | 2714 | / v 2715 || [ ] <-- vector pre header. 2716 |/ | 2717 | v 2718 | [ ] \ 2719 | [ ]_| <-- vector loop. 2720 | | 2721 | v 2722 | -[ ] <--- middle-block. 2723 | / | 2724 | / v 2725 -|- >[ ] <--- new preheader. 2726 | | 2727 | v 2728 | [ ] \ 2729 | [ ]_| <-- old scalar loop to handle remainder. 2730 \ | 2731 \ v 2732 >[ ] <-- exit block. 2733 ... 2734 */ 2735 2736 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2737 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2738 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2739 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2740 assert(VectorPH && "Invalid loop structure"); 2741 assert(ExitBlock && "Must have an exit block"); 2742 2743 // Some loops have a single integer induction variable, while other loops 2744 // don't. One example is c++ iterators that often have multiple pointer 2745 // induction variables. In the code below we also support a case where we 2746 // don't have a single induction variable. 2747 // 2748 // We try to obtain an induction variable from the original loop as hard 2749 // as possible. However if we don't find one that: 2750 // - is an integer 2751 // - counts from zero, stepping by one 2752 // - is the size of the widest induction variable type 2753 // then we create a new one. 2754 OldInduction = Legal->getPrimaryInduction(); 2755 Type *IdxTy = Legal->getWidestInductionType(); 2756 2757 // Split the single block loop into the two loop structure described above. 2758 BasicBlock *VecBody = 2759 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2760 BasicBlock *MiddleBlock = 2761 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2762 BasicBlock *ScalarPH = 2763 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2764 2765 // Create and register the new vector loop. 2766 Loop *Lp = LI->AllocateLoop(); 2767 Loop *ParentLoop = OrigLoop->getParentLoop(); 2768 2769 // Insert the new loop into the loop nest and register the new basic blocks 2770 // before calling any utilities such as SCEV that require valid LoopInfo. 2771 if (ParentLoop) { 2772 ParentLoop->addChildLoop(Lp); 2773 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2774 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2775 } else { 2776 LI->addTopLevelLoop(Lp); 2777 } 2778 Lp->addBasicBlockToLoop(VecBody, *LI); 2779 2780 // Find the loop boundaries. 2781 Value *Count = getOrCreateTripCount(Lp); 2782 2783 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2784 2785 // Now, compare the new count to zero. If it is zero skip the vector loop and 2786 // jump to the scalar loop. This check also covers the case where the 2787 // backedge-taken count is uint##_max: adding one to it will overflow leading 2788 // to an incorrect trip count of zero. In this (rare) case we will also jump 2789 // to the scalar loop. 2790 emitMinimumIterationCountCheck(Lp, ScalarPH); 2791 2792 // Generate the code to check any assumptions that we've made for SCEV 2793 // expressions. 2794 emitSCEVChecks(Lp, ScalarPH); 2795 2796 // Generate the code that checks in runtime if arrays overlap. We put the 2797 // checks into a separate block to make the more common case of few elements 2798 // faster. 2799 emitMemRuntimeChecks(Lp, ScalarPH); 2800 2801 // Generate the induction variable. 2802 // The loop step is equal to the vectorization factor (num of SIMD elements) 2803 // times the unroll factor (num of SIMD instructions). 2804 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2805 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2806 Induction = 2807 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2808 getDebugLocFromInstOrOperands(OldInduction)); 2809 2810 // We are going to resume the execution of the scalar loop. 2811 // Go over all of the induction variables that we found and fix the 2812 // PHIs that are left in the scalar version of the loop. 2813 // The starting values of PHI nodes depend on the counter of the last 2814 // iteration in the vectorized loop. 2815 // If we come from a bypass edge then we need to start from the original 2816 // start value. 2817 2818 // This variable saves the new starting index for the scalar loop. It is used 2819 // to test if there are any tail iterations left once the vector loop has 2820 // completed. 2821 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2822 for (auto &InductionEntry : *List) { 2823 PHINode *OrigPhi = InductionEntry.first; 2824 InductionDescriptor II = InductionEntry.second; 2825 2826 // Create phi nodes to merge from the backedge-taken check block. 2827 PHINode *BCResumeVal = PHINode::Create( 2828 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 2829 // Copy original phi DL over to the new one. 2830 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 2831 Value *&EndValue = IVEndValues[OrigPhi]; 2832 if (OrigPhi == OldInduction) { 2833 // We know what the end value is. 2834 EndValue = CountRoundDown; 2835 } else { 2836 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 2837 Type *StepType = II.getStep()->getType(); 2838 Instruction::CastOps CastOp = 2839 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 2840 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 2841 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2842 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 2843 EndValue->setName("ind.end"); 2844 } 2845 2846 // The new PHI merges the original incoming value, in case of a bypass, 2847 // or the value at the end of the vectorized loop. 2848 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2849 2850 // Fix the scalar body counter (PHI node). 2851 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 2852 2853 // The old induction's phi node in the scalar body needs the truncated 2854 // value. 2855 for (BasicBlock *BB : LoopBypassBlocks) 2856 BCResumeVal->addIncoming(II.getStartValue(), BB); 2857 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 2858 } 2859 2860 // Add a check in the middle block to see if we have completed 2861 // all of the iterations in the first vector loop. 2862 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2863 // If tail is to be folded, we know we don't need to run the remainder. 2864 Value *CmpN = Builder.getTrue(); 2865 if (!Cost->foldTailByMasking()) 2866 CmpN = 2867 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2868 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 2869 ReplaceInstWithInst(MiddleBlock->getTerminator(), 2870 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 2871 2872 // Get ready to start creating new instructions into the vectorized body. 2873 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2874 2875 // Save the state. 2876 LoopVectorPreHeader = Lp->getLoopPreheader(); 2877 LoopScalarPreHeader = ScalarPH; 2878 LoopMiddleBlock = MiddleBlock; 2879 LoopExitBlock = ExitBlock; 2880 LoopVectorBody = VecBody; 2881 LoopScalarBody = OldBasicBlock; 2882 2883 Optional<MDNode *> VectorizedLoopID = 2884 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 2885 LLVMLoopVectorizeFollowupVectorized}); 2886 if (VectorizedLoopID.hasValue()) { 2887 Lp->setLoopID(VectorizedLoopID.getValue()); 2888 2889 // Do not setAlreadyVectorized if loop attributes have been defined 2890 // explicitly. 2891 return LoopVectorPreHeader; 2892 } 2893 2894 // Keep all loop hints from the original loop on the vector loop (we'll 2895 // replace the vectorizer-specific hints below). 2896 if (MDNode *LID = OrigLoop->getLoopID()) 2897 Lp->setLoopID(LID); 2898 2899 LoopVectorizeHints Hints(Lp, true, *ORE); 2900 Hints.setAlreadyVectorized(); 2901 2902 return LoopVectorPreHeader; 2903 } 2904 2905 // Fix up external users of the induction variable. At this point, we are 2906 // in LCSSA form, with all external PHIs that use the IV having one input value, 2907 // coming from the remainder loop. We need those PHIs to also have a correct 2908 // value for the IV when arriving directly from the middle block. 2909 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 2910 const InductionDescriptor &II, 2911 Value *CountRoundDown, Value *EndValue, 2912 BasicBlock *MiddleBlock) { 2913 // There are two kinds of external IV usages - those that use the value 2914 // computed in the last iteration (the PHI) and those that use the penultimate 2915 // value (the value that feeds into the phi from the loop latch). 2916 // We allow both, but they, obviously, have different values. 2917 2918 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 2919 2920 DenseMap<Value *, Value *> MissingVals; 2921 2922 // An external user of the last iteration's value should see the value that 2923 // the remainder loop uses to initialize its own IV. 2924 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 2925 for (User *U : PostInc->users()) { 2926 Instruction *UI = cast<Instruction>(U); 2927 if (!OrigLoop->contains(UI)) { 2928 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2929 MissingVals[UI] = EndValue; 2930 } 2931 } 2932 2933 // An external user of the penultimate value need to see EndValue - Step. 2934 // The simplest way to get this is to recompute it from the constituent SCEVs, 2935 // that is Start + (Step * (CRD - 1)). 2936 for (User *U : OrigPhi->users()) { 2937 auto *UI = cast<Instruction>(U); 2938 if (!OrigLoop->contains(UI)) { 2939 const DataLayout &DL = 2940 OrigLoop->getHeader()->getModule()->getDataLayout(); 2941 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2942 2943 IRBuilder<> B(MiddleBlock->getTerminator()); 2944 Value *CountMinusOne = B.CreateSub( 2945 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 2946 Value *CMO = 2947 !II.getStep()->getType()->isIntegerTy() 2948 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 2949 II.getStep()->getType()) 2950 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 2951 CMO->setName("cast.cmo"); 2952 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 2953 Escape->setName("ind.escape"); 2954 MissingVals[UI] = Escape; 2955 } 2956 } 2957 2958 for (auto &I : MissingVals) { 2959 PHINode *PHI = cast<PHINode>(I.first); 2960 // One corner case we have to handle is two IVs "chasing" each-other, 2961 // that is %IV2 = phi [...], [ %IV1, %latch ] 2962 // In this case, if IV1 has an external use, we need to avoid adding both 2963 // "last value of IV1" and "penultimate value of IV2". So, verify that we 2964 // don't already have an incoming value for the middle block. 2965 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 2966 PHI->addIncoming(I.second, MiddleBlock); 2967 } 2968 } 2969 2970 namespace { 2971 2972 struct CSEDenseMapInfo { 2973 static bool canHandle(const Instruction *I) { 2974 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 2975 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 2976 } 2977 2978 static inline Instruction *getEmptyKey() { 2979 return DenseMapInfo<Instruction *>::getEmptyKey(); 2980 } 2981 2982 static inline Instruction *getTombstoneKey() { 2983 return DenseMapInfo<Instruction *>::getTombstoneKey(); 2984 } 2985 2986 static unsigned getHashValue(const Instruction *I) { 2987 assert(canHandle(I) && "Unknown instruction!"); 2988 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 2989 I->value_op_end())); 2990 } 2991 2992 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 2993 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 2994 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 2995 return LHS == RHS; 2996 return LHS->isIdenticalTo(RHS); 2997 } 2998 }; 2999 3000 } // end anonymous namespace 3001 3002 ///Perform cse of induction variable instructions. 3003 static void cse(BasicBlock *BB) { 3004 // Perform simple cse. 3005 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3006 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3007 Instruction *In = &*I++; 3008 3009 if (!CSEDenseMapInfo::canHandle(In)) 3010 continue; 3011 3012 // Check if we can replace this instruction with any of the 3013 // visited instructions. 3014 if (Instruction *V = CSEMap.lookup(In)) { 3015 In->replaceAllUsesWith(V); 3016 In->eraseFromParent(); 3017 continue; 3018 } 3019 3020 CSEMap[In] = In; 3021 } 3022 } 3023 3024 /// Estimate the overhead of scalarizing an instruction. This is a 3025 /// convenience wrapper for the type-based getScalarizationOverhead API. 3026 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3027 const TargetTransformInfo &TTI) { 3028 if (VF == 1) 3029 return 0; 3030 3031 unsigned Cost = 0; 3032 Type *RetTy = ToVectorTy(I->getType(), VF); 3033 if (!RetTy->isVoidTy() && 3034 (!isa<LoadInst>(I) || 3035 !TTI.supportsEfficientVectorElementLoadStore())) 3036 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3037 3038 // Some targets keep addresses scalar. 3039 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 3040 return Cost; 3041 3042 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3043 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3044 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3045 } 3046 else if (!isa<StoreInst>(I) || 3047 !TTI.supportsEfficientVectorElementLoadStore()) { 3048 SmallVector<const Value *, 4> Operands(I->operand_values()); 3049 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3050 } 3051 3052 return Cost; 3053 } 3054 3055 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3056 // Return the cost of the instruction, including scalarization overhead if it's 3057 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3058 // i.e. either vector version isn't available, or is too expensive. 3059 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3060 const TargetTransformInfo &TTI, 3061 const TargetLibraryInfo *TLI, 3062 bool &NeedToScalarize) { 3063 Function *F = CI->getCalledFunction(); 3064 StringRef FnName = CI->getCalledFunction()->getName(); 3065 Type *ScalarRetTy = CI->getType(); 3066 SmallVector<Type *, 4> Tys, ScalarTys; 3067 for (auto &ArgOp : CI->arg_operands()) 3068 ScalarTys.push_back(ArgOp->getType()); 3069 3070 // Estimate cost of scalarized vector call. The source operands are assumed 3071 // to be vectors, so we need to extract individual elements from there, 3072 // execute VF scalar calls, and then gather the result into the vector return 3073 // value. 3074 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3075 if (VF == 1) 3076 return ScalarCallCost; 3077 3078 // Compute corresponding vector type for return value and arguments. 3079 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3080 for (Type *ScalarTy : ScalarTys) 3081 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3082 3083 // Compute costs of unpacking argument values for the scalar calls and 3084 // packing the return values to a vector. 3085 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3086 3087 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3088 3089 // If we can't emit a vector call for this function, then the currently found 3090 // cost is the cost we need to return. 3091 NeedToScalarize = true; 3092 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3093 return Cost; 3094 3095 // If the corresponding vector cost is cheaper, return its cost. 3096 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3097 if (VectorCallCost < Cost) { 3098 NeedToScalarize = false; 3099 return VectorCallCost; 3100 } 3101 return Cost; 3102 } 3103 3104 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3105 // factor VF. Return the cost of the instruction, including scalarization 3106 // overhead if it's needed. 3107 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3108 const TargetTransformInfo &TTI, 3109 const TargetLibraryInfo *TLI) { 3110 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3111 assert(ID && "Expected intrinsic call!"); 3112 3113 FastMathFlags FMF; 3114 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3115 FMF = FPMO->getFastMathFlags(); 3116 3117 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3118 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3119 } 3120 3121 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3122 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3123 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3124 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3125 } 3126 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3127 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3128 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3129 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3130 } 3131 3132 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3133 // For every instruction `I` in MinBWs, truncate the operands, create a 3134 // truncated version of `I` and reextend its result. InstCombine runs 3135 // later and will remove any ext/trunc pairs. 3136 SmallPtrSet<Value *, 4> Erased; 3137 for (const auto &KV : Cost->getMinimalBitwidths()) { 3138 // If the value wasn't vectorized, we must maintain the original scalar 3139 // type. The absence of the value from VectorLoopValueMap indicates that it 3140 // wasn't vectorized. 3141 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3142 continue; 3143 for (unsigned Part = 0; Part < UF; ++Part) { 3144 Value *I = getOrCreateVectorValue(KV.first, Part); 3145 if (Erased.find(I) != Erased.end() || I->use_empty() || 3146 !isa<Instruction>(I)) 3147 continue; 3148 Type *OriginalTy = I->getType(); 3149 Type *ScalarTruncatedTy = 3150 IntegerType::get(OriginalTy->getContext(), KV.second); 3151 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3152 OriginalTy->getVectorNumElements()); 3153 if (TruncatedTy == OriginalTy) 3154 continue; 3155 3156 IRBuilder<> B(cast<Instruction>(I)); 3157 auto ShrinkOperand = [&](Value *V) -> Value * { 3158 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3159 if (ZI->getSrcTy() == TruncatedTy) 3160 return ZI->getOperand(0); 3161 return B.CreateZExtOrTrunc(V, TruncatedTy); 3162 }; 3163 3164 // The actual instruction modification depends on the instruction type, 3165 // unfortunately. 3166 Value *NewI = nullptr; 3167 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3168 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3169 ShrinkOperand(BO->getOperand(1))); 3170 3171 // Any wrapping introduced by shrinking this operation shouldn't be 3172 // considered undefined behavior. So, we can't unconditionally copy 3173 // arithmetic wrapping flags to NewI. 3174 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3175 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3176 NewI = 3177 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3178 ShrinkOperand(CI->getOperand(1))); 3179 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3180 NewI = B.CreateSelect(SI->getCondition(), 3181 ShrinkOperand(SI->getTrueValue()), 3182 ShrinkOperand(SI->getFalseValue())); 3183 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3184 switch (CI->getOpcode()) { 3185 default: 3186 llvm_unreachable("Unhandled cast!"); 3187 case Instruction::Trunc: 3188 NewI = ShrinkOperand(CI->getOperand(0)); 3189 break; 3190 case Instruction::SExt: 3191 NewI = B.CreateSExtOrTrunc( 3192 CI->getOperand(0), 3193 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3194 break; 3195 case Instruction::ZExt: 3196 NewI = B.CreateZExtOrTrunc( 3197 CI->getOperand(0), 3198 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3199 break; 3200 } 3201 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3202 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3203 auto *O0 = B.CreateZExtOrTrunc( 3204 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3205 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3206 auto *O1 = B.CreateZExtOrTrunc( 3207 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3208 3209 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3210 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3211 // Don't do anything with the operands, just extend the result. 3212 continue; 3213 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3214 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3215 auto *O0 = B.CreateZExtOrTrunc( 3216 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3217 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3218 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3219 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3220 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3221 auto *O0 = B.CreateZExtOrTrunc( 3222 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3223 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3224 } else { 3225 // If we don't know what to do, be conservative and don't do anything. 3226 continue; 3227 } 3228 3229 // Lastly, extend the result. 3230 NewI->takeName(cast<Instruction>(I)); 3231 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3232 I->replaceAllUsesWith(Res); 3233 cast<Instruction>(I)->eraseFromParent(); 3234 Erased.insert(I); 3235 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3236 } 3237 } 3238 3239 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3240 for (const auto &KV : Cost->getMinimalBitwidths()) { 3241 // If the value wasn't vectorized, we must maintain the original scalar 3242 // type. The absence of the value from VectorLoopValueMap indicates that it 3243 // wasn't vectorized. 3244 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3245 continue; 3246 for (unsigned Part = 0; Part < UF; ++Part) { 3247 Value *I = getOrCreateVectorValue(KV.first, Part); 3248 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3249 if (Inst && Inst->use_empty()) { 3250 Value *NewI = Inst->getOperand(0); 3251 Inst->eraseFromParent(); 3252 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3253 } 3254 } 3255 } 3256 } 3257 3258 void InnerLoopVectorizer::fixVectorizedLoop() { 3259 // Insert truncates and extends for any truncated instructions as hints to 3260 // InstCombine. 3261 if (VF > 1) 3262 truncateToMinimalBitwidths(); 3263 3264 // Fix widened non-induction PHIs by setting up the PHI operands. 3265 if (OrigPHIsToFix.size()) { 3266 assert(EnableVPlanNativePath && 3267 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3268 fixNonInductionPHIs(); 3269 } 3270 3271 // At this point every instruction in the original loop is widened to a 3272 // vector form. Now we need to fix the recurrences in the loop. These PHI 3273 // nodes are currently empty because we did not want to introduce cycles. 3274 // This is the second stage of vectorizing recurrences. 3275 fixCrossIterationPHIs(); 3276 3277 // Update the dominator tree. 3278 // 3279 // FIXME: After creating the structure of the new loop, the dominator tree is 3280 // no longer up-to-date, and it remains that way until we update it 3281 // here. An out-of-date dominator tree is problematic for SCEV, 3282 // because SCEVExpander uses it to guide code generation. The 3283 // vectorizer use SCEVExpanders in several places. Instead, we should 3284 // keep the dominator tree up-to-date as we go. 3285 updateAnalysis(); 3286 3287 // Fix-up external users of the induction variables. 3288 for (auto &Entry : *Legal->getInductionVars()) 3289 fixupIVUsers(Entry.first, Entry.second, 3290 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3291 IVEndValues[Entry.first], LoopMiddleBlock); 3292 3293 fixLCSSAPHIs(); 3294 for (Instruction *PI : PredicatedInstructions) 3295 sinkScalarOperands(&*PI); 3296 3297 // Remove redundant induction instructions. 3298 cse(LoopVectorBody); 3299 } 3300 3301 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3302 // In order to support recurrences we need to be able to vectorize Phi nodes. 3303 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3304 // stage #2: We now need to fix the recurrences by adding incoming edges to 3305 // the currently empty PHI nodes. At this point every instruction in the 3306 // original loop is widened to a vector form so we can use them to construct 3307 // the incoming edges. 3308 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3309 // Handle first-order recurrences and reductions that need to be fixed. 3310 if (Legal->isFirstOrderRecurrence(&Phi)) 3311 fixFirstOrderRecurrence(&Phi); 3312 else if (Legal->isReductionVariable(&Phi)) 3313 fixReduction(&Phi); 3314 } 3315 } 3316 3317 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3318 // This is the second phase of vectorizing first-order recurrences. An 3319 // overview of the transformation is described below. Suppose we have the 3320 // following loop. 3321 // 3322 // for (int i = 0; i < n; ++i) 3323 // b[i] = a[i] - a[i - 1]; 3324 // 3325 // There is a first-order recurrence on "a". For this loop, the shorthand 3326 // scalar IR looks like: 3327 // 3328 // scalar.ph: 3329 // s_init = a[-1] 3330 // br scalar.body 3331 // 3332 // scalar.body: 3333 // i = phi [0, scalar.ph], [i+1, scalar.body] 3334 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3335 // s2 = a[i] 3336 // b[i] = s2 - s1 3337 // br cond, scalar.body, ... 3338 // 3339 // In this example, s1 is a recurrence because it's value depends on the 3340 // previous iteration. In the first phase of vectorization, we created a 3341 // temporary value for s1. We now complete the vectorization and produce the 3342 // shorthand vector IR shown below (for VF = 4, UF = 1). 3343 // 3344 // vector.ph: 3345 // v_init = vector(..., ..., ..., a[-1]) 3346 // br vector.body 3347 // 3348 // vector.body 3349 // i = phi [0, vector.ph], [i+4, vector.body] 3350 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3351 // v2 = a[i, i+1, i+2, i+3]; 3352 // v3 = vector(v1(3), v2(0, 1, 2)) 3353 // b[i, i+1, i+2, i+3] = v2 - v3 3354 // br cond, vector.body, middle.block 3355 // 3356 // middle.block: 3357 // x = v2(3) 3358 // br scalar.ph 3359 // 3360 // scalar.ph: 3361 // s_init = phi [x, middle.block], [a[-1], otherwise] 3362 // br scalar.body 3363 // 3364 // After execution completes the vector loop, we extract the next value of 3365 // the recurrence (x) to use as the initial value in the scalar loop. 3366 3367 // Get the original loop preheader and single loop latch. 3368 auto *Preheader = OrigLoop->getLoopPreheader(); 3369 auto *Latch = OrigLoop->getLoopLatch(); 3370 3371 // Get the initial and previous values of the scalar recurrence. 3372 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3373 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3374 3375 // Create a vector from the initial value. 3376 auto *VectorInit = ScalarInit; 3377 if (VF > 1) { 3378 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3379 VectorInit = Builder.CreateInsertElement( 3380 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3381 Builder.getInt32(VF - 1), "vector.recur.init"); 3382 } 3383 3384 // We constructed a temporary phi node in the first phase of vectorization. 3385 // This phi node will eventually be deleted. 3386 Builder.SetInsertPoint( 3387 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3388 3389 // Create a phi node for the new recurrence. The current value will either be 3390 // the initial value inserted into a vector or loop-varying vector value. 3391 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3392 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3393 3394 // Get the vectorized previous value of the last part UF - 1. It appears last 3395 // among all unrolled iterations, due to the order of their construction. 3396 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3397 3398 // Set the insertion point after the previous value if it is an instruction. 3399 // Note that the previous value may have been constant-folded so it is not 3400 // guaranteed to be an instruction in the vector loop. Also, if the previous 3401 // value is a phi node, we should insert after all the phi nodes to avoid 3402 // breaking basic block verification. 3403 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 3404 isa<PHINode>(PreviousLastPart)) 3405 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3406 else 3407 Builder.SetInsertPoint( 3408 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 3409 3410 // We will construct a vector for the recurrence by combining the values for 3411 // the current and previous iterations. This is the required shuffle mask. 3412 SmallVector<Constant *, 8> ShuffleMask(VF); 3413 ShuffleMask[0] = Builder.getInt32(VF - 1); 3414 for (unsigned I = 1; I < VF; ++I) 3415 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3416 3417 // The vector from which to take the initial value for the current iteration 3418 // (actual or unrolled). Initially, this is the vector phi node. 3419 Value *Incoming = VecPhi; 3420 3421 // Shuffle the current and previous vector and update the vector parts. 3422 for (unsigned Part = 0; Part < UF; ++Part) { 3423 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3424 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3425 auto *Shuffle = 3426 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3427 ConstantVector::get(ShuffleMask)) 3428 : Incoming; 3429 PhiPart->replaceAllUsesWith(Shuffle); 3430 cast<Instruction>(PhiPart)->eraseFromParent(); 3431 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3432 Incoming = PreviousPart; 3433 } 3434 3435 // Fix the latch value of the new recurrence in the vector loop. 3436 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3437 3438 // Extract the last vector element in the middle block. This will be the 3439 // initial value for the recurrence when jumping to the scalar loop. 3440 auto *ExtractForScalar = Incoming; 3441 if (VF > 1) { 3442 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3443 ExtractForScalar = Builder.CreateExtractElement( 3444 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3445 } 3446 // Extract the second last element in the middle block if the 3447 // Phi is used outside the loop. We need to extract the phi itself 3448 // and not the last element (the phi update in the current iteration). This 3449 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3450 // when the scalar loop is not run at all. 3451 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3452 if (VF > 1) 3453 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3454 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3455 // When loop is unrolled without vectorizing, initialize 3456 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3457 // `Incoming`. This is analogous to the vectorized case above: extracting the 3458 // second last element when VF > 1. 3459 else if (UF > 1) 3460 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3461 3462 // Fix the initial value of the original recurrence in the scalar loop. 3463 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3464 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3465 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3466 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3467 Start->addIncoming(Incoming, BB); 3468 } 3469 3470 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3471 Phi->setName("scalar.recur"); 3472 3473 // Finally, fix users of the recurrence outside the loop. The users will need 3474 // either the last value of the scalar recurrence or the last value of the 3475 // vector recurrence we extracted in the middle block. Since the loop is in 3476 // LCSSA form, we just need to find all the phi nodes for the original scalar 3477 // recurrence in the exit block, and then add an edge for the middle block. 3478 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3479 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3480 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3481 } 3482 } 3483 } 3484 3485 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3486 Constant *Zero = Builder.getInt32(0); 3487 3488 // Get it's reduction variable descriptor. 3489 assert(Legal->isReductionVariable(Phi) && 3490 "Unable to find the reduction variable"); 3491 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3492 3493 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3494 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3495 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3496 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3497 RdxDesc.getMinMaxRecurrenceKind(); 3498 setDebugLocFromInst(Builder, ReductionStartValue); 3499 3500 // We need to generate a reduction vector from the incoming scalar. 3501 // To do so, we need to generate the 'identity' vector and override 3502 // one of the elements with the incoming scalar reduction. We need 3503 // to do it in the vector-loop preheader. 3504 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3505 3506 // This is the vector-clone of the value that leaves the loop. 3507 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3508 3509 // Find the reduction identity variable. Zero for addition, or, xor, 3510 // one for multiplication, -1 for And. 3511 Value *Identity; 3512 Value *VectorStart; 3513 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3514 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3515 // MinMax reduction have the start value as their identify. 3516 if (VF == 1) { 3517 VectorStart = Identity = ReductionStartValue; 3518 } else { 3519 VectorStart = Identity = 3520 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3521 } 3522 } else { 3523 // Handle other reduction kinds: 3524 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3525 RK, VecTy->getScalarType()); 3526 if (VF == 1) { 3527 Identity = Iden; 3528 // This vector is the Identity vector where the first element is the 3529 // incoming scalar reduction. 3530 VectorStart = ReductionStartValue; 3531 } else { 3532 Identity = ConstantVector::getSplat(VF, Iden); 3533 3534 // This vector is the Identity vector where the first element is the 3535 // incoming scalar reduction. 3536 VectorStart = 3537 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3538 } 3539 } 3540 3541 // Fix the vector-loop phi. 3542 3543 // Reductions do not have to start at zero. They can start with 3544 // any loop invariant values. 3545 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3546 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3547 for (unsigned Part = 0; Part < UF; ++Part) { 3548 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3549 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3550 // Make sure to add the reduction stat value only to the 3551 // first unroll part. 3552 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3553 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3554 cast<PHINode>(VecRdxPhi) 3555 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3556 } 3557 3558 // Before each round, move the insertion point right between 3559 // the PHIs and the values we are going to write. 3560 // This allows us to write both PHINodes and the extractelement 3561 // instructions. 3562 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3563 3564 setDebugLocFromInst(Builder, LoopExitInst); 3565 3566 // If the vector reduction can be performed in a smaller type, we truncate 3567 // then extend the loop exit value to enable InstCombine to evaluate the 3568 // entire expression in the smaller type. 3569 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3570 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3571 Builder.SetInsertPoint( 3572 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3573 VectorParts RdxParts(UF); 3574 for (unsigned Part = 0; Part < UF; ++Part) { 3575 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3576 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3577 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3578 : Builder.CreateZExt(Trunc, VecTy); 3579 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3580 UI != RdxParts[Part]->user_end();) 3581 if (*UI != Trunc) { 3582 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3583 RdxParts[Part] = Extnd; 3584 } else { 3585 ++UI; 3586 } 3587 } 3588 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3589 for (unsigned Part = 0; Part < UF; ++Part) { 3590 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3591 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3592 } 3593 } 3594 3595 // Reduce all of the unrolled parts into a single vector. 3596 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3597 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3598 setDebugLocFromInst(Builder, ReducedPartRdx); 3599 for (unsigned Part = 1; Part < UF; ++Part) { 3600 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3601 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3602 // Floating point operations had to be 'fast' to enable the reduction. 3603 ReducedPartRdx = addFastMathFlag( 3604 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3605 ReducedPartRdx, "bin.rdx")); 3606 else 3607 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3608 RdxPart); 3609 } 3610 3611 if (VF > 1) { 3612 bool NoNaN = Legal->hasFunNoNaNAttr(); 3613 ReducedPartRdx = 3614 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3615 // If the reduction can be performed in a smaller type, we need to extend 3616 // the reduction to the wider type before we branch to the original loop. 3617 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3618 ReducedPartRdx = 3619 RdxDesc.isSigned() 3620 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3621 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3622 } 3623 3624 // Create a phi node that merges control-flow from the backedge-taken check 3625 // block and the middle block. 3626 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3627 LoopScalarPreHeader->getTerminator()); 3628 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3629 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3630 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3631 3632 // Now, we need to fix the users of the reduction variable 3633 // inside and outside of the scalar remainder loop. 3634 // We know that the loop is in LCSSA form. We need to update the 3635 // PHI nodes in the exit blocks. 3636 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3637 // All PHINodes need to have a single entry edge, or two if 3638 // we already fixed them. 3639 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3640 3641 // We found a reduction value exit-PHI. Update it with the 3642 // incoming bypass edge. 3643 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3644 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3645 } // end of the LCSSA phi scan. 3646 3647 // Fix the scalar loop reduction variable with the incoming reduction sum 3648 // from the vector body and from the backedge value. 3649 int IncomingEdgeBlockIdx = 3650 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3651 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3652 // Pick the other block. 3653 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3654 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3655 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3656 } 3657 3658 void InnerLoopVectorizer::fixLCSSAPHIs() { 3659 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3660 if (LCSSAPhi.getNumIncomingValues() == 1) { 3661 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3662 // Non-instruction incoming values will have only one value. 3663 unsigned LastLane = 0; 3664 if (isa<Instruction>(IncomingValue)) 3665 LastLane = Cost->isUniformAfterVectorization( 3666 cast<Instruction>(IncomingValue), VF) 3667 ? 0 3668 : VF - 1; 3669 // Can be a loop invariant incoming value or the last scalar value to be 3670 // extracted from the vectorized loop. 3671 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3672 Value *lastIncomingValue = 3673 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3674 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3675 } 3676 } 3677 } 3678 3679 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3680 // The basic block and loop containing the predicated instruction. 3681 auto *PredBB = PredInst->getParent(); 3682 auto *VectorLoop = LI->getLoopFor(PredBB); 3683 3684 // Initialize a worklist with the operands of the predicated instruction. 3685 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3686 3687 // Holds instructions that we need to analyze again. An instruction may be 3688 // reanalyzed if we don't yet know if we can sink it or not. 3689 SmallVector<Instruction *, 8> InstsToReanalyze; 3690 3691 // Returns true if a given use occurs in the predicated block. Phi nodes use 3692 // their operands in their corresponding predecessor blocks. 3693 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3694 auto *I = cast<Instruction>(U.getUser()); 3695 BasicBlock *BB = I->getParent(); 3696 if (auto *Phi = dyn_cast<PHINode>(I)) 3697 BB = Phi->getIncomingBlock( 3698 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3699 return BB == PredBB; 3700 }; 3701 3702 // Iteratively sink the scalarized operands of the predicated instruction 3703 // into the block we created for it. When an instruction is sunk, it's 3704 // operands are then added to the worklist. The algorithm ends after one pass 3705 // through the worklist doesn't sink a single instruction. 3706 bool Changed; 3707 do { 3708 // Add the instructions that need to be reanalyzed to the worklist, and 3709 // reset the changed indicator. 3710 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 3711 InstsToReanalyze.clear(); 3712 Changed = false; 3713 3714 while (!Worklist.empty()) { 3715 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 3716 3717 // We can't sink an instruction if it is a phi node, is already in the 3718 // predicated block, is not in the loop, or may have side effects. 3719 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 3720 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 3721 continue; 3722 3723 // It's legal to sink the instruction if all its uses occur in the 3724 // predicated block. Otherwise, there's nothing to do yet, and we may 3725 // need to reanalyze the instruction. 3726 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 3727 InstsToReanalyze.push_back(I); 3728 continue; 3729 } 3730 3731 // Move the instruction to the beginning of the predicated block, and add 3732 // it's operands to the worklist. 3733 I->moveBefore(&*PredBB->getFirstInsertionPt()); 3734 Worklist.insert(I->op_begin(), I->op_end()); 3735 3736 // The sinking may have enabled other instructions to be sunk, so we will 3737 // need to iterate. 3738 Changed = true; 3739 } 3740 } while (Changed); 3741 } 3742 3743 void InnerLoopVectorizer::fixNonInductionPHIs() { 3744 for (PHINode *OrigPhi : OrigPHIsToFix) { 3745 PHINode *NewPhi = 3746 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 3747 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 3748 3749 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 3750 predecessors(OrigPhi->getParent())); 3751 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 3752 predecessors(NewPhi->getParent())); 3753 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 3754 "Scalar and Vector BB should have the same number of predecessors"); 3755 3756 // The insertion point in Builder may be invalidated by the time we get 3757 // here. Force the Builder insertion point to something valid so that we do 3758 // not run into issues during insertion point restore in 3759 // getOrCreateVectorValue calls below. 3760 Builder.SetInsertPoint(NewPhi); 3761 3762 // The predecessor order is preserved and we can rely on mapping between 3763 // scalar and vector block predecessors. 3764 for (unsigned i = 0; i < NumIncomingValues; ++i) { 3765 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 3766 3767 // When looking up the new scalar/vector values to fix up, use incoming 3768 // values from original phi. 3769 Value *ScIncV = 3770 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 3771 3772 // Scalar incoming value may need a broadcast 3773 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 3774 NewPhi->addIncoming(NewIncV, NewPredBB); 3775 } 3776 } 3777 } 3778 3779 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 3780 unsigned VF) { 3781 PHINode *P = cast<PHINode>(PN); 3782 if (EnableVPlanNativePath) { 3783 // Currently we enter here in the VPlan-native path for non-induction 3784 // PHIs where all control flow is uniform. We simply widen these PHIs. 3785 // Create a vector phi with no operands - the vector phi operands will be 3786 // set at the end of vector code generation. 3787 Type *VecTy = 3788 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3789 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 3790 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 3791 OrigPHIsToFix.push_back(P); 3792 3793 return; 3794 } 3795 3796 assert(PN->getParent() == OrigLoop->getHeader() && 3797 "Non-header phis should have been handled elsewhere"); 3798 3799 // In order to support recurrences we need to be able to vectorize Phi nodes. 3800 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3801 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 3802 // this value when we vectorize all of the instructions that use the PHI. 3803 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 3804 for (unsigned Part = 0; Part < UF; ++Part) { 3805 // This is phase one of vectorizing PHIs. 3806 Type *VecTy = 3807 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3808 Value *EntryPart = PHINode::Create( 3809 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 3810 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 3811 } 3812 return; 3813 } 3814 3815 setDebugLocFromInst(Builder, P); 3816 3817 // This PHINode must be an induction variable. 3818 // Make sure that we know about it. 3819 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 3820 3821 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3822 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3823 3824 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3825 // which can be found from the original scalar operations. 3826 switch (II.getKind()) { 3827 case InductionDescriptor::IK_NoInduction: 3828 llvm_unreachable("Unknown induction"); 3829 case InductionDescriptor::IK_IntInduction: 3830 case InductionDescriptor::IK_FpInduction: 3831 llvm_unreachable("Integer/fp induction is handled elsewhere."); 3832 case InductionDescriptor::IK_PtrInduction: { 3833 // Handle the pointer induction variable case. 3834 assert(P->getType()->isPointerTy() && "Unexpected type."); 3835 // This is the normalized GEP that starts counting at zero. 3836 Value *PtrInd = Induction; 3837 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 3838 // Determine the number of scalars we need to generate for each unroll 3839 // iteration. If the instruction is uniform, we only need to generate the 3840 // first lane. Otherwise, we generate all VF values. 3841 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 3842 // These are the scalar results. Notice that we don't generate vector GEPs 3843 // because scalar GEPs result in better code. 3844 for (unsigned Part = 0; Part < UF; ++Part) { 3845 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3846 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 3847 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3848 Value *SclrGep = 3849 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 3850 SclrGep->setName("next.gep"); 3851 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 3852 } 3853 } 3854 return; 3855 } 3856 } 3857 } 3858 3859 /// A helper function for checking whether an integer division-related 3860 /// instruction may divide by zero (in which case it must be predicated if 3861 /// executed conditionally in the scalar code). 3862 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 3863 /// Non-zero divisors that are non compile-time constants will not be 3864 /// converted into multiplication, so we will still end up scalarizing 3865 /// the division, but can do so w/o predication. 3866 static bool mayDivideByZero(Instruction &I) { 3867 assert((I.getOpcode() == Instruction::UDiv || 3868 I.getOpcode() == Instruction::SDiv || 3869 I.getOpcode() == Instruction::URem || 3870 I.getOpcode() == Instruction::SRem) && 3871 "Unexpected instruction"); 3872 Value *Divisor = I.getOperand(1); 3873 auto *CInt = dyn_cast<ConstantInt>(Divisor); 3874 return !CInt || CInt->isZero(); 3875 } 3876 3877 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 3878 switch (I.getOpcode()) { 3879 case Instruction::Br: 3880 case Instruction::PHI: 3881 llvm_unreachable("This instruction is handled by a different recipe."); 3882 case Instruction::GetElementPtr: { 3883 // Construct a vector GEP by widening the operands of the scalar GEP as 3884 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 3885 // results in a vector of pointers when at least one operand of the GEP 3886 // is vector-typed. Thus, to keep the representation compact, we only use 3887 // vector-typed operands for loop-varying values. 3888 auto *GEP = cast<GetElementPtrInst>(&I); 3889 3890 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 3891 // If we are vectorizing, but the GEP has only loop-invariant operands, 3892 // the GEP we build (by only using vector-typed operands for 3893 // loop-varying values) would be a scalar pointer. Thus, to ensure we 3894 // produce a vector of pointers, we need to either arbitrarily pick an 3895 // operand to broadcast, or broadcast a clone of the original GEP. 3896 // Here, we broadcast a clone of the original. 3897 // 3898 // TODO: If at some point we decide to scalarize instructions having 3899 // loop-invariant operands, this special case will no longer be 3900 // required. We would add the scalarization decision to 3901 // collectLoopScalars() and teach getVectorValue() to broadcast 3902 // the lane-zero scalar value. 3903 auto *Clone = Builder.Insert(GEP->clone()); 3904 for (unsigned Part = 0; Part < UF; ++Part) { 3905 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 3906 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 3907 addMetadata(EntryPart, GEP); 3908 } 3909 } else { 3910 // If the GEP has at least one loop-varying operand, we are sure to 3911 // produce a vector of pointers. But if we are only unrolling, we want 3912 // to produce a scalar GEP for each unroll part. Thus, the GEP we 3913 // produce with the code below will be scalar (if VF == 1) or vector 3914 // (otherwise). Note that for the unroll-only case, we still maintain 3915 // values in the vector mapping with initVector, as we do for other 3916 // instructions. 3917 for (unsigned Part = 0; Part < UF; ++Part) { 3918 // The pointer operand of the new GEP. If it's loop-invariant, we 3919 // won't broadcast it. 3920 auto *Ptr = 3921 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 3922 ? GEP->getPointerOperand() 3923 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 3924 3925 // Collect all the indices for the new GEP. If any index is 3926 // loop-invariant, we won't broadcast it. 3927 SmallVector<Value *, 4> Indices; 3928 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 3929 if (OrigLoop->isLoopInvariant(U.get())) 3930 Indices.push_back(U.get()); 3931 else 3932 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 3933 } 3934 3935 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 3936 // but it should be a vector, otherwise. 3937 auto *NewGEP = GEP->isInBounds() 3938 ? Builder.CreateInBoundsGEP(Ptr, Indices) 3939 : Builder.CreateGEP(Ptr, Indices); 3940 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 3941 "NewGEP is not a pointer vector"); 3942 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 3943 addMetadata(NewGEP, GEP); 3944 } 3945 } 3946 3947 break; 3948 } 3949 case Instruction::UDiv: 3950 case Instruction::SDiv: 3951 case Instruction::SRem: 3952 case Instruction::URem: 3953 case Instruction::Add: 3954 case Instruction::FAdd: 3955 case Instruction::Sub: 3956 case Instruction::FSub: 3957 case Instruction::Mul: 3958 case Instruction::FMul: 3959 case Instruction::FDiv: 3960 case Instruction::FRem: 3961 case Instruction::Shl: 3962 case Instruction::LShr: 3963 case Instruction::AShr: 3964 case Instruction::And: 3965 case Instruction::Or: 3966 case Instruction::Xor: { 3967 // Just widen binops. 3968 auto *BinOp = cast<BinaryOperator>(&I); 3969 setDebugLocFromInst(Builder, BinOp); 3970 3971 for (unsigned Part = 0; Part < UF; ++Part) { 3972 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 3973 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 3974 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 3975 3976 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 3977 VecOp->copyIRFlags(BinOp); 3978 3979 // Use this vector value for all users of the original instruction. 3980 VectorLoopValueMap.setVectorValue(&I, Part, V); 3981 addMetadata(V, BinOp); 3982 } 3983 3984 break; 3985 } 3986 case Instruction::Select: { 3987 // Widen selects. 3988 // If the selector is loop invariant we can create a select 3989 // instruction with a scalar condition. Otherwise, use vector-select. 3990 auto *SE = PSE.getSE(); 3991 bool InvariantCond = 3992 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 3993 setDebugLocFromInst(Builder, &I); 3994 3995 // The condition can be loop invariant but still defined inside the 3996 // loop. This means that we can't just use the original 'cond' value. 3997 // We have to take the 'vectorized' value and pick the first lane. 3998 // Instcombine will make this a no-op. 3999 4000 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4001 4002 for (unsigned Part = 0; Part < UF; ++Part) { 4003 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4004 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4005 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4006 Value *Sel = 4007 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4008 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4009 addMetadata(Sel, &I); 4010 } 4011 4012 break; 4013 } 4014 4015 case Instruction::ICmp: 4016 case Instruction::FCmp: { 4017 // Widen compares. Generate vector compares. 4018 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4019 auto *Cmp = dyn_cast<CmpInst>(&I); 4020 setDebugLocFromInst(Builder, Cmp); 4021 for (unsigned Part = 0; Part < UF; ++Part) { 4022 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4023 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4024 Value *C = nullptr; 4025 if (FCmp) { 4026 // Propagate fast math flags. 4027 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4028 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4029 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4030 } else { 4031 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4032 } 4033 VectorLoopValueMap.setVectorValue(&I, Part, C); 4034 addMetadata(C, &I); 4035 } 4036 4037 break; 4038 } 4039 4040 case Instruction::ZExt: 4041 case Instruction::SExt: 4042 case Instruction::FPToUI: 4043 case Instruction::FPToSI: 4044 case Instruction::FPExt: 4045 case Instruction::PtrToInt: 4046 case Instruction::IntToPtr: 4047 case Instruction::SIToFP: 4048 case Instruction::UIToFP: 4049 case Instruction::Trunc: 4050 case Instruction::FPTrunc: 4051 case Instruction::BitCast: { 4052 auto *CI = dyn_cast<CastInst>(&I); 4053 setDebugLocFromInst(Builder, CI); 4054 4055 /// Vectorize casts. 4056 Type *DestTy = 4057 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4058 4059 for (unsigned Part = 0; Part < UF; ++Part) { 4060 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4061 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4062 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4063 addMetadata(Cast, &I); 4064 } 4065 break; 4066 } 4067 4068 case Instruction::Call: { 4069 // Ignore dbg intrinsics. 4070 if (isa<DbgInfoIntrinsic>(I)) 4071 break; 4072 setDebugLocFromInst(Builder, &I); 4073 4074 Module *M = I.getParent()->getParent()->getParent(); 4075 auto *CI = cast<CallInst>(&I); 4076 4077 StringRef FnName = CI->getCalledFunction()->getName(); 4078 Function *F = CI->getCalledFunction(); 4079 Type *RetTy = ToVectorTy(CI->getType(), VF); 4080 SmallVector<Type *, 4> Tys; 4081 for (Value *ArgOperand : CI->arg_operands()) 4082 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4083 4084 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4085 4086 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4087 // version of the instruction. 4088 // Is it beneficial to perform intrinsic call compared to lib call? 4089 bool NeedToScalarize; 4090 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4091 bool UseVectorIntrinsic = 4092 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4093 assert((UseVectorIntrinsic || !NeedToScalarize) && 4094 "Instruction should be scalarized elsewhere."); 4095 4096 for (unsigned Part = 0; Part < UF; ++Part) { 4097 SmallVector<Value *, 4> Args; 4098 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4099 Value *Arg = CI->getArgOperand(i); 4100 // Some intrinsics have a scalar argument - don't replace it with a 4101 // vector. 4102 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4103 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4104 Args.push_back(Arg); 4105 } 4106 4107 Function *VectorF; 4108 if (UseVectorIntrinsic) { 4109 // Use vector version of the intrinsic. 4110 Type *TysForDecl[] = {CI->getType()}; 4111 if (VF > 1) 4112 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4113 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4114 } else { 4115 // Use vector version of the library call. 4116 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4117 assert(!VFnName.empty() && "Vector function name is empty."); 4118 VectorF = M->getFunction(VFnName); 4119 if (!VectorF) { 4120 // Generate a declaration 4121 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4122 VectorF = 4123 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4124 VectorF->copyAttributesFrom(F); 4125 } 4126 } 4127 assert(VectorF && "Can't create vector function."); 4128 4129 SmallVector<OperandBundleDef, 1> OpBundles; 4130 CI->getOperandBundlesAsDefs(OpBundles); 4131 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4132 4133 if (isa<FPMathOperator>(V)) 4134 V->copyFastMathFlags(CI); 4135 4136 VectorLoopValueMap.setVectorValue(&I, Part, V); 4137 addMetadata(V, &I); 4138 } 4139 4140 break; 4141 } 4142 4143 default: 4144 // This instruction is not vectorized by simple widening. 4145 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4146 llvm_unreachable("Unhandled instruction!"); 4147 } // end of switch. 4148 } 4149 4150 void InnerLoopVectorizer::updateAnalysis() { 4151 // Forget the original basic block. 4152 PSE.getSE()->forgetLoop(OrigLoop); 4153 4154 // DT is not kept up-to-date for outer loop vectorization 4155 if (EnableVPlanNativePath) 4156 return; 4157 4158 // Update the dominator tree information. 4159 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4160 "Entry does not dominate exit."); 4161 4162 DT->addNewBlock(LoopMiddleBlock, 4163 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4164 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4165 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4166 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4167 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 4168 } 4169 4170 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4171 // We should not collect Scalars more than once per VF. Right now, this 4172 // function is called from collectUniformsAndScalars(), which already does 4173 // this check. Collecting Scalars for VF=1 does not make any sense. 4174 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4175 "This function should not be visited twice for the same VF"); 4176 4177 SmallSetVector<Instruction *, 8> Worklist; 4178 4179 // These sets are used to seed the analysis with pointers used by memory 4180 // accesses that will remain scalar. 4181 SmallSetVector<Instruction *, 8> ScalarPtrs; 4182 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4183 4184 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4185 // The pointer operands of loads and stores will be scalar as long as the 4186 // memory access is not a gather or scatter operation. The value operand of a 4187 // store will remain scalar if the store is scalarized. 4188 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4189 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4190 assert(WideningDecision != CM_Unknown && 4191 "Widening decision should be ready at this moment"); 4192 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4193 if (Ptr == Store->getValueOperand()) 4194 return WideningDecision == CM_Scalarize; 4195 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4196 "Ptr is neither a value or pointer operand"); 4197 return WideningDecision != CM_GatherScatter; 4198 }; 4199 4200 // A helper that returns true if the given value is a bitcast or 4201 // getelementptr instruction contained in the loop. 4202 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4203 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4204 isa<GetElementPtrInst>(V)) && 4205 !TheLoop->isLoopInvariant(V); 4206 }; 4207 4208 // A helper that evaluates a memory access's use of a pointer. If the use 4209 // will be a scalar use, and the pointer is only used by memory accesses, we 4210 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4211 // PossibleNonScalarPtrs. 4212 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4213 // We only care about bitcast and getelementptr instructions contained in 4214 // the loop. 4215 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4216 return; 4217 4218 // If the pointer has already been identified as scalar (e.g., if it was 4219 // also identified as uniform), there's nothing to do. 4220 auto *I = cast<Instruction>(Ptr); 4221 if (Worklist.count(I)) 4222 return; 4223 4224 // If the use of the pointer will be a scalar use, and all users of the 4225 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4226 // place the pointer in PossibleNonScalarPtrs. 4227 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4228 return isa<LoadInst>(U) || isa<StoreInst>(U); 4229 })) 4230 ScalarPtrs.insert(I); 4231 else 4232 PossibleNonScalarPtrs.insert(I); 4233 }; 4234 4235 // We seed the scalars analysis with three classes of instructions: (1) 4236 // instructions marked uniform-after-vectorization, (2) bitcast and 4237 // getelementptr instructions used by memory accesses requiring a scalar use, 4238 // and (3) pointer induction variables and their update instructions (we 4239 // currently only scalarize these). 4240 // 4241 // (1) Add to the worklist all instructions that have been identified as 4242 // uniform-after-vectorization. 4243 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4244 4245 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4246 // memory accesses requiring a scalar use. The pointer operands of loads and 4247 // stores will be scalar as long as the memory accesses is not a gather or 4248 // scatter operation. The value operand of a store will remain scalar if the 4249 // store is scalarized. 4250 for (auto *BB : TheLoop->blocks()) 4251 for (auto &I : *BB) { 4252 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4253 evaluatePtrUse(Load, Load->getPointerOperand()); 4254 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4255 evaluatePtrUse(Store, Store->getPointerOperand()); 4256 evaluatePtrUse(Store, Store->getValueOperand()); 4257 } 4258 } 4259 for (auto *I : ScalarPtrs) 4260 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4261 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4262 Worklist.insert(I); 4263 } 4264 4265 // (3) Add to the worklist all pointer induction variables and their update 4266 // instructions. 4267 // 4268 // TODO: Once we are able to vectorize pointer induction variables we should 4269 // no longer insert them into the worklist here. 4270 auto *Latch = TheLoop->getLoopLatch(); 4271 for (auto &Induction : *Legal->getInductionVars()) { 4272 auto *Ind = Induction.first; 4273 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4274 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4275 continue; 4276 Worklist.insert(Ind); 4277 Worklist.insert(IndUpdate); 4278 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4279 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4280 << "\n"); 4281 } 4282 4283 // Insert the forced scalars. 4284 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4285 // induction variable when the PHI user is scalarized. 4286 auto ForcedScalar = ForcedScalars.find(VF); 4287 if (ForcedScalar != ForcedScalars.end()) 4288 for (auto *I : ForcedScalar->second) 4289 Worklist.insert(I); 4290 4291 // Expand the worklist by looking through any bitcasts and getelementptr 4292 // instructions we've already identified as scalar. This is similar to the 4293 // expansion step in collectLoopUniforms(); however, here we're only 4294 // expanding to include additional bitcasts and getelementptr instructions. 4295 unsigned Idx = 0; 4296 while (Idx != Worklist.size()) { 4297 Instruction *Dst = Worklist[Idx++]; 4298 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4299 continue; 4300 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4301 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4302 auto *J = cast<Instruction>(U); 4303 return !TheLoop->contains(J) || Worklist.count(J) || 4304 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4305 isScalarUse(J, Src)); 4306 })) { 4307 Worklist.insert(Src); 4308 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4309 } 4310 } 4311 4312 // An induction variable will remain scalar if all users of the induction 4313 // variable and induction variable update remain scalar. 4314 for (auto &Induction : *Legal->getInductionVars()) { 4315 auto *Ind = Induction.first; 4316 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4317 4318 // We already considered pointer induction variables, so there's no reason 4319 // to look at their users again. 4320 // 4321 // TODO: Once we are able to vectorize pointer induction variables we 4322 // should no longer skip over them here. 4323 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4324 continue; 4325 4326 // Determine if all users of the induction variable are scalar after 4327 // vectorization. 4328 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4329 auto *I = cast<Instruction>(U); 4330 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4331 }); 4332 if (!ScalarInd) 4333 continue; 4334 4335 // Determine if all users of the induction variable update instruction are 4336 // scalar after vectorization. 4337 auto ScalarIndUpdate = 4338 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4339 auto *I = cast<Instruction>(U); 4340 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4341 }); 4342 if (!ScalarIndUpdate) 4343 continue; 4344 4345 // The induction variable and its update instruction will remain scalar. 4346 Worklist.insert(Ind); 4347 Worklist.insert(IndUpdate); 4348 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4349 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4350 << "\n"); 4351 } 4352 4353 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4354 } 4355 4356 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4357 if (!blockNeedsPredication(I->getParent())) 4358 return false; 4359 switch(I->getOpcode()) { 4360 default: 4361 break; 4362 case Instruction::Load: 4363 case Instruction::Store: { 4364 if (!Legal->isMaskRequired(I)) 4365 return false; 4366 auto *Ptr = getLoadStorePointerOperand(I); 4367 auto *Ty = getMemInstValueType(I); 4368 // We have already decided how to vectorize this instruction, get that 4369 // result. 4370 if (VF > 1) { 4371 InstWidening WideningDecision = getWideningDecision(I, VF); 4372 assert(WideningDecision != CM_Unknown && 4373 "Widening decision should be ready at this moment"); 4374 return WideningDecision == CM_Scalarize; 4375 } 4376 return isa<LoadInst>(I) ? 4377 !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) 4378 : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty)); 4379 } 4380 case Instruction::UDiv: 4381 case Instruction::SDiv: 4382 case Instruction::SRem: 4383 case Instruction::URem: 4384 return mayDivideByZero(*I); 4385 } 4386 return false; 4387 } 4388 4389 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4390 unsigned VF) { 4391 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4392 assert(getWideningDecision(I, VF) == CM_Unknown && 4393 "Decision should not be set yet."); 4394 auto *Group = getInterleavedAccessGroup(I); 4395 assert(Group && "Must have a group."); 4396 4397 // Check if masking is required. 4398 // A Group may need masking for one of two reasons: it resides in a block that 4399 // needs predication, or it was decided to use masking to deal with gaps. 4400 bool PredicatedAccessRequiresMasking = 4401 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4402 bool AccessWithGapsRequiresMasking = 4403 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 4404 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4405 return true; 4406 4407 // If masked interleaving is required, we expect that the user/target had 4408 // enabled it, because otherwise it either wouldn't have been created or 4409 // it should have been invalidated by the CostModel. 4410 assert(useMaskedInterleavedAccesses(TTI) && 4411 "Masked interleave-groups for predicated accesses are not enabled."); 4412 4413 auto *Ty = getMemInstValueType(I); 4414 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty) 4415 : TTI.isLegalMaskedStore(Ty); 4416 } 4417 4418 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4419 unsigned VF) { 4420 // Get and ensure we have a valid memory instruction. 4421 LoadInst *LI = dyn_cast<LoadInst>(I); 4422 StoreInst *SI = dyn_cast<StoreInst>(I); 4423 assert((LI || SI) && "Invalid memory instruction"); 4424 4425 auto *Ptr = getLoadStorePointerOperand(I); 4426 4427 // In order to be widened, the pointer should be consecutive, first of all. 4428 if (!Legal->isConsecutivePtr(Ptr)) 4429 return false; 4430 4431 // If the instruction is a store located in a predicated block, it will be 4432 // scalarized. 4433 if (isScalarWithPredication(I)) 4434 return false; 4435 4436 // If the instruction's allocated size doesn't equal it's type size, it 4437 // requires padding and will be scalarized. 4438 auto &DL = I->getModule()->getDataLayout(); 4439 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4440 if (hasIrregularType(ScalarTy, DL, VF)) 4441 return false; 4442 4443 return true; 4444 } 4445 4446 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4447 // We should not collect Uniforms more than once per VF. Right now, 4448 // this function is called from collectUniformsAndScalars(), which 4449 // already does this check. Collecting Uniforms for VF=1 does not make any 4450 // sense. 4451 4452 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4453 "This function should not be visited twice for the same VF"); 4454 4455 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4456 // not analyze again. Uniforms.count(VF) will return 1. 4457 Uniforms[VF].clear(); 4458 4459 // We now know that the loop is vectorizable! 4460 // Collect instructions inside the loop that will remain uniform after 4461 // vectorization. 4462 4463 // Global values, params and instructions outside of current loop are out of 4464 // scope. 4465 auto isOutOfScope = [&](Value *V) -> bool { 4466 Instruction *I = dyn_cast<Instruction>(V); 4467 return (!I || !TheLoop->contains(I)); 4468 }; 4469 4470 SetVector<Instruction *> Worklist; 4471 BasicBlock *Latch = TheLoop->getLoopLatch(); 4472 4473 // Start with the conditional branch. If the branch condition is an 4474 // instruction contained in the loop that is only used by the branch, it is 4475 // uniform. 4476 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4477 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 4478 Worklist.insert(Cmp); 4479 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 4480 } 4481 4482 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4483 // are pointers that are treated like consecutive pointers during 4484 // vectorization. The pointer operands of interleaved accesses are an 4485 // example. 4486 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4487 4488 // Holds pointer operands of instructions that are possibly non-uniform. 4489 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4490 4491 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4492 InstWidening WideningDecision = getWideningDecision(I, VF); 4493 assert(WideningDecision != CM_Unknown && 4494 "Widening decision should be ready at this moment"); 4495 4496 return (WideningDecision == CM_Widen || 4497 WideningDecision == CM_Widen_Reverse || 4498 WideningDecision == CM_Interleave); 4499 }; 4500 // Iterate over the instructions in the loop, and collect all 4501 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4502 // that a consecutive-like pointer operand will be scalarized, we collect it 4503 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4504 // getelementptr instruction can be used by both vectorized and scalarized 4505 // memory instructions. For example, if a loop loads and stores from the same 4506 // location, but the store is conditional, the store will be scalarized, and 4507 // the getelementptr won't remain uniform. 4508 for (auto *BB : TheLoop->blocks()) 4509 for (auto &I : *BB) { 4510 // If there's no pointer operand, there's nothing to do. 4511 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4512 if (!Ptr) 4513 continue; 4514 4515 // True if all users of Ptr are memory accesses that have Ptr as their 4516 // pointer operand. 4517 auto UsersAreMemAccesses = 4518 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4519 return getLoadStorePointerOperand(U) == Ptr; 4520 }); 4521 4522 // Ensure the memory instruction will not be scalarized or used by 4523 // gather/scatter, making its pointer operand non-uniform. If the pointer 4524 // operand is used by any instruction other than a memory access, we 4525 // conservatively assume the pointer operand may be non-uniform. 4526 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4527 PossibleNonUniformPtrs.insert(Ptr); 4528 4529 // If the memory instruction will be vectorized and its pointer operand 4530 // is consecutive-like, or interleaving - the pointer operand should 4531 // remain uniform. 4532 else 4533 ConsecutiveLikePtrs.insert(Ptr); 4534 } 4535 4536 // Add to the Worklist all consecutive and consecutive-like pointers that 4537 // aren't also identified as possibly non-uniform. 4538 for (auto *V : ConsecutiveLikePtrs) 4539 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) { 4540 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 4541 Worklist.insert(V); 4542 } 4543 4544 // Expand Worklist in topological order: whenever a new instruction 4545 // is added , its users should be already inside Worklist. It ensures 4546 // a uniform instruction will only be used by uniform instructions. 4547 unsigned idx = 0; 4548 while (idx != Worklist.size()) { 4549 Instruction *I = Worklist[idx++]; 4550 4551 for (auto OV : I->operand_values()) { 4552 // isOutOfScope operands cannot be uniform instructions. 4553 if (isOutOfScope(OV)) 4554 continue; 4555 // First order recurrence Phi's should typically be considered 4556 // non-uniform. 4557 auto *OP = dyn_cast<PHINode>(OV); 4558 if (OP && Legal->isFirstOrderRecurrence(OP)) 4559 continue; 4560 // If all the users of the operand are uniform, then add the 4561 // operand into the uniform worklist. 4562 auto *OI = cast<Instruction>(OV); 4563 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4564 auto *J = cast<Instruction>(U); 4565 return Worklist.count(J) || 4566 (OI == getLoadStorePointerOperand(J) && 4567 isUniformDecision(J, VF)); 4568 })) { 4569 Worklist.insert(OI); 4570 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 4571 } 4572 } 4573 } 4574 4575 // Returns true if Ptr is the pointer operand of a memory access instruction 4576 // I, and I is known to not require scalarization. 4577 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4578 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4579 }; 4580 4581 // For an instruction to be added into Worklist above, all its users inside 4582 // the loop should also be in Worklist. However, this condition cannot be 4583 // true for phi nodes that form a cyclic dependence. We must process phi 4584 // nodes separately. An induction variable will remain uniform if all users 4585 // of the induction variable and induction variable update remain uniform. 4586 // The code below handles both pointer and non-pointer induction variables. 4587 for (auto &Induction : *Legal->getInductionVars()) { 4588 auto *Ind = Induction.first; 4589 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4590 4591 // Determine if all users of the induction variable are uniform after 4592 // vectorization. 4593 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4594 auto *I = cast<Instruction>(U); 4595 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4596 isVectorizedMemAccessUse(I, Ind); 4597 }); 4598 if (!UniformInd) 4599 continue; 4600 4601 // Determine if all users of the induction variable update instruction are 4602 // uniform after vectorization. 4603 auto UniformIndUpdate = 4604 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4605 auto *I = cast<Instruction>(U); 4606 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4607 isVectorizedMemAccessUse(I, IndUpdate); 4608 }); 4609 if (!UniformIndUpdate) 4610 continue; 4611 4612 // The induction variable and its update instruction will remain uniform. 4613 Worklist.insert(Ind); 4614 Worklist.insert(IndUpdate); 4615 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 4616 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate 4617 << "\n"); 4618 } 4619 4620 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4621 } 4622 4623 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 4624 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4625 // TODO: It may by useful to do since it's still likely to be dynamically 4626 // uniform if the target can skip. 4627 LLVM_DEBUG( 4628 dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 4629 4630 ORE->emit( 4631 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 4632 << "runtime pointer checks needed. Not enabled for divergent target"); 4633 4634 return None; 4635 } 4636 4637 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4638 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 4639 return computeFeasibleMaxVF(OptForSize, TC); 4640 4641 if (Legal->getRuntimePointerChecking()->Need) { 4642 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4643 << "runtime pointer checks needed. Enable vectorization of this " 4644 "loop with '#pragma clang loop vectorize(enable)' when " 4645 "compiling with -Os/-Oz"); 4646 LLVM_DEBUG( 4647 dbgs() 4648 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4649 return None; 4650 } 4651 4652 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4653 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4654 << "runtime SCEV checks needed. Enable vectorization of this " 4655 "loop with '#pragma clang loop vectorize(enable)' when " 4656 "compiling with -Os/-Oz"); 4657 LLVM_DEBUG( 4658 dbgs() 4659 << "LV: Aborting. Runtime SCEV check is required with -Os/-Oz.\n"); 4660 return None; 4661 } 4662 4663 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4664 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4665 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4666 << "runtime stride == 1 checks needed. Enable vectorization of " 4667 "this loop with '#pragma clang loop vectorize(enable)' when " 4668 "compiling with -Os/-Oz"); 4669 LLVM_DEBUG( 4670 dbgs() 4671 << "LV: Aborting. Runtime stride check is required with -Os/-Oz.\n"); 4672 return None; 4673 } 4674 4675 // If we optimize the program for size, avoid creating the tail loop. 4676 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4677 4678 if (TC == 1) { 4679 ORE->emit(createMissedAnalysis("SingleIterationLoop") 4680 << "loop trip count is one, irrelevant for vectorization"); 4681 LLVM_DEBUG(dbgs() << "LV: Aborting, single iteration (non) loop.\n"); 4682 return None; 4683 } 4684 4685 // Record that scalar epilogue is not allowed. 4686 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4687 4688 IsScalarEpilogueAllowed = !OptForSize; 4689 4690 // We don't create an epilogue when optimizing for size. 4691 // Invalidate interleave groups that require an epilogue if we can't mask 4692 // the interleave-group. 4693 if (!useMaskedInterleavedAccesses(TTI)) 4694 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4695 4696 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 4697 4698 if (TC > 0 && TC % MaxVF == 0) { 4699 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 4700 return MaxVF; 4701 } 4702 4703 // If we don't know the precise trip count, or if the trip count that we 4704 // found modulo the vectorization factor is not zero, try to fold the tail 4705 // by masking. 4706 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 4707 if (Legal->canFoldTailByMasking()) { 4708 FoldTailByMasking = true; 4709 return MaxVF; 4710 } 4711 4712 if (TC == 0) { 4713 ORE->emit( 4714 createMissedAnalysis("UnknownLoopCountComplexCFG") 4715 << "unable to calculate the loop count due to complex control flow"); 4716 return None; 4717 } 4718 4719 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 4720 << "cannot optimize for size and vectorize at the same time. " 4721 "Enable vectorization of this loop with '#pragma clang loop " 4722 "vectorize(enable)' when compiling with -Os/-Oz"); 4723 return None; 4724 } 4725 4726 unsigned 4727 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 4728 unsigned ConstTripCount) { 4729 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4730 unsigned SmallestType, WidestType; 4731 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4732 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4733 4734 // Get the maximum safe dependence distance in bits computed by LAA. 4735 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4736 // the memory accesses that is most restrictive (involved in the smallest 4737 // dependence distance). 4738 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 4739 4740 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 4741 4742 unsigned MaxVectorSize = WidestRegister / WidestType; 4743 4744 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4745 << " / " << WidestType << " bits.\n"); 4746 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 4747 << WidestRegister << " bits.\n"); 4748 4749 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 4750 " into one vector!"); 4751 if (MaxVectorSize == 0) { 4752 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4753 MaxVectorSize = 1; 4754 return MaxVectorSize; 4755 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 4756 isPowerOf2_32(ConstTripCount)) { 4757 // We need to clamp the VF to be the ConstTripCount. There is no point in 4758 // choosing a higher viable VF as done in the loop below. 4759 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 4760 << ConstTripCount << "\n"); 4761 MaxVectorSize = ConstTripCount; 4762 return MaxVectorSize; 4763 } 4764 4765 unsigned MaxVF = MaxVectorSize; 4766 if (TTI.shouldMaximizeVectorBandwidth(OptForSize) || 4767 (MaximizeBandwidth && !OptForSize)) { 4768 // Collect all viable vectorization factors larger than the default MaxVF 4769 // (i.e. MaxVectorSize). 4770 SmallVector<unsigned, 8> VFs; 4771 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 4772 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 4773 VFs.push_back(VS); 4774 4775 // For each VF calculate its register usage. 4776 auto RUs = calculateRegisterUsage(VFs); 4777 4778 // Select the largest VF which doesn't require more registers than existing 4779 // ones. 4780 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 4781 for (int i = RUs.size() - 1; i >= 0; --i) { 4782 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 4783 MaxVF = VFs[i]; 4784 break; 4785 } 4786 } 4787 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 4788 if (MaxVF < MinVF) { 4789 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 4790 << ") with target's minimum: " << MinVF << '\n'); 4791 MaxVF = MinVF; 4792 } 4793 } 4794 } 4795 return MaxVF; 4796 } 4797 4798 VectorizationFactor 4799 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 4800 float Cost = expectedCost(1).first; 4801 const float ScalarCost = Cost; 4802 unsigned Width = 1; 4803 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4804 4805 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4806 if (ForceVectorization && MaxVF > 1) { 4807 // Ignore scalar width, because the user explicitly wants vectorization. 4808 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 4809 // evaluation. 4810 Cost = std::numeric_limits<float>::max(); 4811 } 4812 4813 for (unsigned i = 2; i <= MaxVF; i *= 2) { 4814 // Notice that the vector loop needs to be executed less times, so 4815 // we need to divide the cost of the vector loops by the width of 4816 // the vector elements. 4817 VectorizationCostTy C = expectedCost(i); 4818 float VectorCost = C.first / (float)i; 4819 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 4820 << " costs: " << (int)VectorCost << ".\n"); 4821 if (!C.second && !ForceVectorization) { 4822 LLVM_DEBUG( 4823 dbgs() << "LV: Not considering vector loop of width " << i 4824 << " because it will not generate any vector instructions.\n"); 4825 continue; 4826 } 4827 if (VectorCost < Cost) { 4828 Cost = VectorCost; 4829 Width = i; 4830 } 4831 } 4832 4833 if (!EnableCondStoresVectorization && NumPredStores) { 4834 ORE->emit(createMissedAnalysis("ConditionalStore") 4835 << "store that is conditionally executed prevents vectorization"); 4836 LLVM_DEBUG( 4837 dbgs() << "LV: No vectorization. There are conditional stores.\n"); 4838 Width = 1; 4839 Cost = ScalarCost; 4840 } 4841 4842 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 4843 << "LV: Vectorization seems to be not beneficial, " 4844 << "but was forced by a user.\n"); 4845 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 4846 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 4847 return Factor; 4848 } 4849 4850 std::pair<unsigned, unsigned> 4851 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 4852 unsigned MinWidth = -1U; 4853 unsigned MaxWidth = 8; 4854 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 4855 4856 // For each block. 4857 for (BasicBlock *BB : TheLoop->blocks()) { 4858 // For each instruction in the loop. 4859 for (Instruction &I : BB->instructionsWithoutDebug()) { 4860 Type *T = I.getType(); 4861 4862 // Skip ignored values. 4863 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 4864 continue; 4865 4866 // Only examine Loads, Stores and PHINodes. 4867 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 4868 continue; 4869 4870 // Examine PHI nodes that are reduction variables. Update the type to 4871 // account for the recurrence type. 4872 if (auto *PN = dyn_cast<PHINode>(&I)) { 4873 if (!Legal->isReductionVariable(PN)) 4874 continue; 4875 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 4876 T = RdxDesc.getRecurrenceType(); 4877 } 4878 4879 // Examine the stored values. 4880 if (auto *ST = dyn_cast<StoreInst>(&I)) 4881 T = ST->getValueOperand()->getType(); 4882 4883 // Ignore loaded pointer types and stored pointer types that are not 4884 // vectorizable. 4885 // 4886 // FIXME: The check here attempts to predict whether a load or store will 4887 // be vectorized. We only know this for certain after a VF has 4888 // been selected. Here, we assume that if an access can be 4889 // vectorized, it will be. We should also look at extending this 4890 // optimization to non-pointer types. 4891 // 4892 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 4893 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 4894 continue; 4895 4896 MinWidth = std::min(MinWidth, 4897 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4898 MaxWidth = std::max(MaxWidth, 4899 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4900 } 4901 } 4902 4903 return {MinWidth, MaxWidth}; 4904 } 4905 4906 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 4907 unsigned VF, 4908 unsigned LoopCost) { 4909 // -- The interleave heuristics -- 4910 // We interleave the loop in order to expose ILP and reduce the loop overhead. 4911 // There are many micro-architectural considerations that we can't predict 4912 // at this level. For example, frontend pressure (on decode or fetch) due to 4913 // code size, or the number and capabilities of the execution ports. 4914 // 4915 // We use the following heuristics to select the interleave count: 4916 // 1. If the code has reductions, then we interleave to break the cross 4917 // iteration dependency. 4918 // 2. If the loop is really small, then we interleave to reduce the loop 4919 // overhead. 4920 // 3. We don't interleave if we think that we will spill registers to memory 4921 // due to the increased register pressure. 4922 4923 // When we optimize for size, we don't interleave. 4924 if (OptForSize) 4925 return 1; 4926 4927 // We used the distance for the interleave count. 4928 if (Legal->getMaxSafeDepDistBytes() != -1U) 4929 return 1; 4930 4931 // Do not interleave loops with a relatively small trip count. 4932 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4933 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 4934 return 1; 4935 4936 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 4937 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 4938 << " registers\n"); 4939 4940 if (VF == 1) { 4941 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 4942 TargetNumRegisters = ForceTargetNumScalarRegs; 4943 } else { 4944 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 4945 TargetNumRegisters = ForceTargetNumVectorRegs; 4946 } 4947 4948 RegisterUsage R = calculateRegisterUsage({VF})[0]; 4949 // We divide by these constants so assume that we have at least one 4950 // instruction that uses at least one register. 4951 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 4952 4953 // We calculate the interleave count using the following formula. 4954 // Subtract the number of loop invariants from the number of available 4955 // registers. These registers are used by all of the interleaved instances. 4956 // Next, divide the remaining registers by the number of registers that is 4957 // required by the loop, in order to estimate how many parallel instances 4958 // fit without causing spills. All of this is rounded down if necessary to be 4959 // a power of two. We want power of two interleave count to simplify any 4960 // addressing operations or alignment considerations. 4961 // We also want power of two interleave counts to ensure that the induction 4962 // variable of the vector loop wraps to zero, when tail is folded by masking; 4963 // this currently happens when OptForSize, in which case IC is set to 1 above. 4964 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 4965 R.MaxLocalUsers); 4966 4967 // Don't count the induction variable as interleaved. 4968 if (EnableIndVarRegisterHeur) 4969 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 4970 std::max(1U, (R.MaxLocalUsers - 1))); 4971 4972 // Clamp the interleave ranges to reasonable counts. 4973 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 4974 4975 // Check if the user has overridden the max. 4976 if (VF == 1) { 4977 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 4978 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 4979 } else { 4980 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 4981 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 4982 } 4983 4984 // If we did not calculate the cost for VF (because the user selected the VF) 4985 // then we calculate the cost of VF here. 4986 if (LoopCost == 0) 4987 LoopCost = expectedCost(VF).first; 4988 4989 // Clamp the calculated IC to be between the 1 and the max interleave count 4990 // that the target allows. 4991 if (IC > MaxInterleaveCount) 4992 IC = MaxInterleaveCount; 4993 else if (IC < 1) 4994 IC = 1; 4995 4996 // Interleave if we vectorized this loop and there is a reduction that could 4997 // benefit from interleaving. 4998 if (VF > 1 && !Legal->getReductionVars()->empty()) { 4999 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5000 return IC; 5001 } 5002 5003 // Note that if we've already vectorized the loop we will have done the 5004 // runtime check and so interleaving won't require further checks. 5005 bool InterleavingRequiresRuntimePointerCheck = 5006 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5007 5008 // We want to interleave small loops in order to reduce the loop overhead and 5009 // potentially expose ILP opportunities. 5010 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5011 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5012 // We assume that the cost overhead is 1 and we use the cost model 5013 // to estimate the cost of the loop and interleave until the cost of the 5014 // loop overhead is about 5% of the cost of the loop. 5015 unsigned SmallIC = 5016 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5017 5018 // Interleave until store/load ports (estimated by max interleave count) are 5019 // saturated. 5020 unsigned NumStores = Legal->getNumStores(); 5021 unsigned NumLoads = Legal->getNumLoads(); 5022 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5023 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5024 5025 // If we have a scalar reduction (vector reductions are already dealt with 5026 // by this point), we can increase the critical path length if the loop 5027 // we're interleaving is inside another loop. Limit, by default to 2, so the 5028 // critical path only gets increased by one reduction operation. 5029 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 5030 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5031 SmallIC = std::min(SmallIC, F); 5032 StoresIC = std::min(StoresIC, F); 5033 LoadsIC = std::min(LoadsIC, F); 5034 } 5035 5036 if (EnableLoadStoreRuntimeInterleave && 5037 std::max(StoresIC, LoadsIC) > SmallIC) { 5038 LLVM_DEBUG( 5039 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5040 return std::max(StoresIC, LoadsIC); 5041 } 5042 5043 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5044 return SmallIC; 5045 } 5046 5047 // Interleave if this is a large loop (small loops are already dealt with by 5048 // this point) that could benefit from interleaving. 5049 bool HasReductions = !Legal->getReductionVars()->empty(); 5050 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5051 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5052 return IC; 5053 } 5054 5055 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5056 return 1; 5057 } 5058 5059 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5060 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5061 // This function calculates the register usage by measuring the highest number 5062 // of values that are alive at a single location. Obviously, this is a very 5063 // rough estimation. We scan the loop in a topological order in order and 5064 // assign a number to each instruction. We use RPO to ensure that defs are 5065 // met before their users. We assume that each instruction that has in-loop 5066 // users starts an interval. We record every time that an in-loop value is 5067 // used, so we have a list of the first and last occurrences of each 5068 // instruction. Next, we transpose this data structure into a multi map that 5069 // holds the list of intervals that *end* at a specific location. This multi 5070 // map allows us to perform a linear search. We scan the instructions linearly 5071 // and record each time that a new interval starts, by placing it in a set. 5072 // If we find this value in the multi-map then we remove it from the set. 5073 // The max register usage is the maximum size of the set. 5074 // We also search for instructions that are defined outside the loop, but are 5075 // used inside the loop. We need this number separately from the max-interval 5076 // usage number because when we unroll, loop-invariant values do not take 5077 // more register. 5078 LoopBlocksDFS DFS(TheLoop); 5079 DFS.perform(LI); 5080 5081 RegisterUsage RU; 5082 5083 // Each 'key' in the map opens a new interval. The values 5084 // of the map are the index of the 'last seen' usage of the 5085 // instruction that is the key. 5086 using IntervalMap = DenseMap<Instruction *, unsigned>; 5087 5088 // Maps instruction to its index. 5089 SmallVector<Instruction *, 64> IdxToInstr; 5090 // Marks the end of each interval. 5091 IntervalMap EndPoint; 5092 // Saves the list of instruction indices that are used in the loop. 5093 SmallPtrSet<Instruction *, 8> Ends; 5094 // Saves the list of values that are used in the loop but are 5095 // defined outside the loop, such as arguments and constants. 5096 SmallPtrSet<Value *, 8> LoopInvariants; 5097 5098 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5099 for (Instruction &I : BB->instructionsWithoutDebug()) { 5100 IdxToInstr.push_back(&I); 5101 5102 // Save the end location of each USE. 5103 for (Value *U : I.operands()) { 5104 auto *Instr = dyn_cast<Instruction>(U); 5105 5106 // Ignore non-instruction values such as arguments, constants, etc. 5107 if (!Instr) 5108 continue; 5109 5110 // If this instruction is outside the loop then record it and continue. 5111 if (!TheLoop->contains(Instr)) { 5112 LoopInvariants.insert(Instr); 5113 continue; 5114 } 5115 5116 // Overwrite previous end points. 5117 EndPoint[Instr] = IdxToInstr.size(); 5118 Ends.insert(Instr); 5119 } 5120 } 5121 } 5122 5123 // Saves the list of intervals that end with the index in 'key'. 5124 using InstrList = SmallVector<Instruction *, 2>; 5125 DenseMap<unsigned, InstrList> TransposeEnds; 5126 5127 // Transpose the EndPoints to a list of values that end at each index. 5128 for (auto &Interval : EndPoint) 5129 TransposeEnds[Interval.second].push_back(Interval.first); 5130 5131 SmallPtrSet<Instruction *, 8> OpenIntervals; 5132 5133 // Get the size of the widest register. 5134 unsigned MaxSafeDepDist = -1U; 5135 if (Legal->getMaxSafeDepDistBytes() != -1U) 5136 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5137 unsigned WidestRegister = 5138 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5139 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5140 5141 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5142 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5143 5144 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5145 5146 // A lambda that gets the register usage for the given type and VF. 5147 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5148 if (Ty->isTokenTy()) 5149 return 0U; 5150 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5151 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5152 }; 5153 5154 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5155 Instruction *I = IdxToInstr[i]; 5156 5157 // Remove all of the instructions that end at this location. 5158 InstrList &List = TransposeEnds[i]; 5159 for (Instruction *ToRemove : List) 5160 OpenIntervals.erase(ToRemove); 5161 5162 // Ignore instructions that are never used within the loop. 5163 if (Ends.find(I) == Ends.end()) 5164 continue; 5165 5166 // Skip ignored values. 5167 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5168 continue; 5169 5170 // For each VF find the maximum usage of registers. 5171 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5172 if (VFs[j] == 1) { 5173 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5174 continue; 5175 } 5176 collectUniformsAndScalars(VFs[j]); 5177 // Count the number of live intervals. 5178 unsigned RegUsage = 0; 5179 for (auto Inst : OpenIntervals) { 5180 // Skip ignored values for VF > 1. 5181 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() || 5182 isScalarAfterVectorization(Inst, VFs[j])) 5183 continue; 5184 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5185 } 5186 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5187 } 5188 5189 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5190 << OpenIntervals.size() << '\n'); 5191 5192 // Add the current instruction to the list of open intervals. 5193 OpenIntervals.insert(I); 5194 } 5195 5196 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5197 unsigned Invariant = 0; 5198 if (VFs[i] == 1) 5199 Invariant = LoopInvariants.size(); 5200 else { 5201 for (auto Inst : LoopInvariants) 5202 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5203 } 5204 5205 LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5206 LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5207 LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant 5208 << '\n'); 5209 5210 RU.LoopInvariantRegs = Invariant; 5211 RU.MaxLocalUsers = MaxUsages[i]; 5212 RUs[i] = RU; 5213 } 5214 5215 return RUs; 5216 } 5217 5218 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5219 // TODO: Cost model for emulated masked load/store is completely 5220 // broken. This hack guides the cost model to use an artificially 5221 // high enough value to practically disable vectorization with such 5222 // operations, except where previously deployed legality hack allowed 5223 // using very low cost values. This is to avoid regressions coming simply 5224 // from moving "masked load/store" check from legality to cost model. 5225 // Masked Load/Gather emulation was previously never allowed. 5226 // Limited number of Masked Store/Scatter emulation was allowed. 5227 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5228 return isa<LoadInst>(I) || 5229 (isa<StoreInst>(I) && 5230 NumPredStores > NumberOfStoresToPredicate); 5231 } 5232 5233 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5234 // If we aren't vectorizing the loop, or if we've already collected the 5235 // instructions to scalarize, there's nothing to do. Collection may already 5236 // have occurred if we have a user-selected VF and are now computing the 5237 // expected cost for interleaving. 5238 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5239 return; 5240 5241 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5242 // not profitable to scalarize any instructions, the presence of VF in the 5243 // map will indicate that we've analyzed it already. 5244 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5245 5246 // Find all the instructions that are scalar with predication in the loop and 5247 // determine if it would be better to not if-convert the blocks they are in. 5248 // If so, we also record the instructions to scalarize. 5249 for (BasicBlock *BB : TheLoop->blocks()) { 5250 if (!blockNeedsPredication(BB)) 5251 continue; 5252 for (Instruction &I : *BB) 5253 if (isScalarWithPredication(&I)) { 5254 ScalarCostsTy ScalarCosts; 5255 // Do not apply discount logic if hacked cost is needed 5256 // for emulated masked memrefs. 5257 if (!useEmulatedMaskMemRefHack(&I) && 5258 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5259 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5260 // Remember that BB will remain after vectorization. 5261 PredicatedBBsAfterVectorization.insert(BB); 5262 } 5263 } 5264 } 5265 5266 int LoopVectorizationCostModel::computePredInstDiscount( 5267 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5268 unsigned VF) { 5269 assert(!isUniformAfterVectorization(PredInst, VF) && 5270 "Instruction marked uniform-after-vectorization will be predicated"); 5271 5272 // Initialize the discount to zero, meaning that the scalar version and the 5273 // vector version cost the same. 5274 int Discount = 0; 5275 5276 // Holds instructions to analyze. The instructions we visit are mapped in 5277 // ScalarCosts. Those instructions are the ones that would be scalarized if 5278 // we find that the scalar version costs less. 5279 SmallVector<Instruction *, 8> Worklist; 5280 5281 // Returns true if the given instruction can be scalarized. 5282 auto canBeScalarized = [&](Instruction *I) -> bool { 5283 // We only attempt to scalarize instructions forming a single-use chain 5284 // from the original predicated block that would otherwise be vectorized. 5285 // Although not strictly necessary, we give up on instructions we know will 5286 // already be scalar to avoid traversing chains that are unlikely to be 5287 // beneficial. 5288 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5289 isScalarAfterVectorization(I, VF)) 5290 return false; 5291 5292 // If the instruction is scalar with predication, it will be analyzed 5293 // separately. We ignore it within the context of PredInst. 5294 if (isScalarWithPredication(I)) 5295 return false; 5296 5297 // If any of the instruction's operands are uniform after vectorization, 5298 // the instruction cannot be scalarized. This prevents, for example, a 5299 // masked load from being scalarized. 5300 // 5301 // We assume we will only emit a value for lane zero of an instruction 5302 // marked uniform after vectorization, rather than VF identical values. 5303 // Thus, if we scalarize an instruction that uses a uniform, we would 5304 // create uses of values corresponding to the lanes we aren't emitting code 5305 // for. This behavior can be changed by allowing getScalarValue to clone 5306 // the lane zero values for uniforms rather than asserting. 5307 for (Use &U : I->operands()) 5308 if (auto *J = dyn_cast<Instruction>(U.get())) 5309 if (isUniformAfterVectorization(J, VF)) 5310 return false; 5311 5312 // Otherwise, we can scalarize the instruction. 5313 return true; 5314 }; 5315 5316 // Returns true if an operand that cannot be scalarized must be extracted 5317 // from a vector. We will account for this scalarization overhead below. Note 5318 // that the non-void predicated instructions are placed in their own blocks, 5319 // and their return values are inserted into vectors. Thus, an extract would 5320 // still be required. 5321 auto needsExtract = [&](Instruction *I) -> bool { 5322 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 5323 }; 5324 5325 // Compute the expected cost discount from scalarizing the entire expression 5326 // feeding the predicated instruction. We currently only consider expressions 5327 // that are single-use instruction chains. 5328 Worklist.push_back(PredInst); 5329 while (!Worklist.empty()) { 5330 Instruction *I = Worklist.pop_back_val(); 5331 5332 // If we've already analyzed the instruction, there's nothing to do. 5333 if (ScalarCosts.find(I) != ScalarCosts.end()) 5334 continue; 5335 5336 // Compute the cost of the vector instruction. Note that this cost already 5337 // includes the scalarization overhead of the predicated instruction. 5338 unsigned VectorCost = getInstructionCost(I, VF).first; 5339 5340 // Compute the cost of the scalarized instruction. This cost is the cost of 5341 // the instruction as if it wasn't if-converted and instead remained in the 5342 // predicated block. We will scale this cost by block probability after 5343 // computing the scalarization overhead. 5344 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5345 5346 // Compute the scalarization overhead of needed insertelement instructions 5347 // and phi nodes. 5348 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5349 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 5350 true, false); 5351 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5352 } 5353 5354 // Compute the scalarization overhead of needed extractelement 5355 // instructions. For each of the instruction's operands, if the operand can 5356 // be scalarized, add it to the worklist; otherwise, account for the 5357 // overhead. 5358 for (Use &U : I->operands()) 5359 if (auto *J = dyn_cast<Instruction>(U.get())) { 5360 assert(VectorType::isValidElementType(J->getType()) && 5361 "Instruction has non-scalar type"); 5362 if (canBeScalarized(J)) 5363 Worklist.push_back(J); 5364 else if (needsExtract(J)) 5365 ScalarCost += TTI.getScalarizationOverhead( 5366 ToVectorTy(J->getType(),VF), false, true); 5367 } 5368 5369 // Scale the total scalar cost by block probability. 5370 ScalarCost /= getReciprocalPredBlockProb(); 5371 5372 // Compute the discount. A non-negative discount means the vector version 5373 // of the instruction costs more, and scalarizing would be beneficial. 5374 Discount += VectorCost - ScalarCost; 5375 ScalarCosts[I] = ScalarCost; 5376 } 5377 5378 return Discount; 5379 } 5380 5381 LoopVectorizationCostModel::VectorizationCostTy 5382 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5383 VectorizationCostTy Cost; 5384 5385 // For each block. 5386 for (BasicBlock *BB : TheLoop->blocks()) { 5387 VectorizationCostTy BlockCost; 5388 5389 // For each instruction in the old loop. 5390 for (Instruction &I : BB->instructionsWithoutDebug()) { 5391 // Skip ignored values. 5392 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5393 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5394 continue; 5395 5396 VectorizationCostTy C = getInstructionCost(&I, VF); 5397 5398 // Check if we should override the cost. 5399 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5400 C.first = ForceTargetInstructionCost; 5401 5402 BlockCost.first += C.first; 5403 BlockCost.second |= C.second; 5404 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5405 << " for VF " << VF << " For instruction: " << I 5406 << '\n'); 5407 } 5408 5409 // If we are vectorizing a predicated block, it will have been 5410 // if-converted. This means that the block's instructions (aside from 5411 // stores and instructions that may divide by zero) will now be 5412 // unconditionally executed. For the scalar case, we may not always execute 5413 // the predicated block. Thus, scale the block's cost by the probability of 5414 // executing it. 5415 if (VF == 1 && blockNeedsPredication(BB)) 5416 BlockCost.first /= getReciprocalPredBlockProb(); 5417 5418 Cost.first += BlockCost.first; 5419 Cost.second |= BlockCost.second; 5420 } 5421 5422 return Cost; 5423 } 5424 5425 /// Gets Address Access SCEV after verifying that the access pattern 5426 /// is loop invariant except the induction variable dependence. 5427 /// 5428 /// This SCEV can be sent to the Target in order to estimate the address 5429 /// calculation cost. 5430 static const SCEV *getAddressAccessSCEV( 5431 Value *Ptr, 5432 LoopVectorizationLegality *Legal, 5433 PredicatedScalarEvolution &PSE, 5434 const Loop *TheLoop) { 5435 5436 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5437 if (!Gep) 5438 return nullptr; 5439 5440 // We are looking for a gep with all loop invariant indices except for one 5441 // which should be an induction variable. 5442 auto SE = PSE.getSE(); 5443 unsigned NumOperands = Gep->getNumOperands(); 5444 for (unsigned i = 1; i < NumOperands; ++i) { 5445 Value *Opd = Gep->getOperand(i); 5446 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5447 !Legal->isInductionVariable(Opd)) 5448 return nullptr; 5449 } 5450 5451 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5452 return PSE.getSCEV(Ptr); 5453 } 5454 5455 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5456 return Legal->hasStride(I->getOperand(0)) || 5457 Legal->hasStride(I->getOperand(1)); 5458 } 5459 5460 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5461 unsigned VF) { 5462 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5463 Type *ValTy = getMemInstValueType(I); 5464 auto SE = PSE.getSE(); 5465 5466 unsigned Alignment = getLoadStoreAlignment(I); 5467 unsigned AS = getLoadStoreAddressSpace(I); 5468 Value *Ptr = getLoadStorePointerOperand(I); 5469 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5470 5471 // Figure out whether the access is strided and get the stride value 5472 // if it's known in compile time 5473 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5474 5475 // Get the cost of the scalar memory instruction and address computation. 5476 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5477 5478 // Don't pass *I here, since it is scalar but will actually be part of a 5479 // vectorized loop where the user of it is a vectorized instruction. 5480 Cost += VF * 5481 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 5482 AS); 5483 5484 // Get the overhead of the extractelement and insertelement instructions 5485 // we might create due to scalarization. 5486 Cost += getScalarizationOverhead(I, VF, TTI); 5487 5488 // If we have a predicated store, it may not be executed for each vector 5489 // lane. Scale the cost by the probability of executing the predicated 5490 // block. 5491 if (isPredicatedInst(I)) { 5492 Cost /= getReciprocalPredBlockProb(); 5493 5494 if (useEmulatedMaskMemRefHack(I)) 5495 // Artificially setting to a high enough value to practically disable 5496 // vectorization with such operations. 5497 Cost = 3000000; 5498 } 5499 5500 return Cost; 5501 } 5502 5503 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5504 unsigned VF) { 5505 Type *ValTy = getMemInstValueType(I); 5506 Type *VectorTy = ToVectorTy(ValTy, VF); 5507 unsigned Alignment = getLoadStoreAlignment(I); 5508 Value *Ptr = getLoadStorePointerOperand(I); 5509 unsigned AS = getLoadStoreAddressSpace(I); 5510 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5511 5512 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5513 "Stride should be 1 or -1 for consecutive memory access"); 5514 unsigned Cost = 0; 5515 if (Legal->isMaskRequired(I)) 5516 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5517 else 5518 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 5519 5520 bool Reverse = ConsecutiveStride < 0; 5521 if (Reverse) 5522 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5523 return Cost; 5524 } 5525 5526 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5527 unsigned VF) { 5528 Type *ValTy = getMemInstValueType(I); 5529 Type *VectorTy = ToVectorTy(ValTy, VF); 5530 unsigned Alignment = getLoadStoreAlignment(I); 5531 unsigned AS = getLoadStoreAddressSpace(I); 5532 if (isa<LoadInst>(I)) { 5533 return TTI.getAddressComputationCost(ValTy) + 5534 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 5535 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5536 } 5537 StoreInst *SI = cast<StoreInst>(I); 5538 5539 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5540 return TTI.getAddressComputationCost(ValTy) + 5541 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) + 5542 (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost( 5543 Instruction::ExtractElement, 5544 VectorTy, VF - 1)); 5545 } 5546 5547 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5548 unsigned VF) { 5549 Type *ValTy = getMemInstValueType(I); 5550 Type *VectorTy = ToVectorTy(ValTy, VF); 5551 unsigned Alignment = getLoadStoreAlignment(I); 5552 Value *Ptr = getLoadStorePointerOperand(I); 5553 5554 return TTI.getAddressComputationCost(VectorTy) + 5555 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5556 Legal->isMaskRequired(I), Alignment); 5557 } 5558 5559 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5560 unsigned VF) { 5561 Type *ValTy = getMemInstValueType(I); 5562 Type *VectorTy = ToVectorTy(ValTy, VF); 5563 unsigned AS = getLoadStoreAddressSpace(I); 5564 5565 auto Group = getInterleavedAccessGroup(I); 5566 assert(Group && "Fail to get an interleaved access group."); 5567 5568 unsigned InterleaveFactor = Group->getFactor(); 5569 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5570 5571 // Holds the indices of existing members in an interleaved load group. 5572 // An interleaved store group doesn't need this as it doesn't allow gaps. 5573 SmallVector<unsigned, 4> Indices; 5574 if (isa<LoadInst>(I)) { 5575 for (unsigned i = 0; i < InterleaveFactor; i++) 5576 if (Group->getMember(i)) 5577 Indices.push_back(i); 5578 } 5579 5580 // Calculate the cost of the whole interleaved group. 5581 bool UseMaskForGaps = 5582 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 5583 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5584 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5585 Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps); 5586 5587 if (Group->isReverse()) { 5588 // TODO: Add support for reversed masked interleaved access. 5589 assert(!Legal->isMaskRequired(I) && 5590 "Reverse masked interleaved access not supported."); 5591 Cost += Group->getNumMembers() * 5592 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5593 } 5594 return Cost; 5595 } 5596 5597 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5598 unsigned VF) { 5599 // Calculate scalar cost only. Vectorization cost should be ready at this 5600 // moment. 5601 if (VF == 1) { 5602 Type *ValTy = getMemInstValueType(I); 5603 unsigned Alignment = getLoadStoreAlignment(I); 5604 unsigned AS = getLoadStoreAddressSpace(I); 5605 5606 return TTI.getAddressComputationCost(ValTy) + 5607 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 5608 } 5609 return getWideningCost(I, VF); 5610 } 5611 5612 LoopVectorizationCostModel::VectorizationCostTy 5613 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5614 // If we know that this instruction will remain uniform, check the cost of 5615 // the scalar version. 5616 if (isUniformAfterVectorization(I, VF)) 5617 VF = 1; 5618 5619 if (VF > 1 && isProfitableToScalarize(I, VF)) 5620 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5621 5622 // Forced scalars do not have any scalarization overhead. 5623 auto ForcedScalar = ForcedScalars.find(VF); 5624 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 5625 auto InstSet = ForcedScalar->second; 5626 if (InstSet.find(I) != InstSet.end()) 5627 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5628 } 5629 5630 Type *VectorTy; 5631 unsigned C = getInstructionCost(I, VF, VectorTy); 5632 5633 bool TypeNotScalarized = 5634 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 5635 return VectorizationCostTy(C, TypeNotScalarized); 5636 } 5637 5638 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 5639 if (VF == 1) 5640 return; 5641 NumPredStores = 0; 5642 for (BasicBlock *BB : TheLoop->blocks()) { 5643 // For each instruction in the old loop. 5644 for (Instruction &I : *BB) { 5645 Value *Ptr = getLoadStorePointerOperand(&I); 5646 if (!Ptr) 5647 continue; 5648 5649 // TODO: We should generate better code and update the cost model for 5650 // predicated uniform stores. Today they are treated as any other 5651 // predicated store (see added test cases in 5652 // invariant-store-vectorization.ll). 5653 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 5654 NumPredStores++; 5655 5656 if (Legal->isUniform(Ptr) && 5657 // Conditional loads and stores should be scalarized and predicated. 5658 // isScalarWithPredication cannot be used here since masked 5659 // gather/scatters are not considered scalar with predication. 5660 !Legal->blockNeedsPredication(I.getParent())) { 5661 // TODO: Avoid replicating loads and stores instead of 5662 // relying on instcombine to remove them. 5663 // Load: Scalar load + broadcast 5664 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 5665 unsigned Cost = getUniformMemOpCost(&I, VF); 5666 setWideningDecision(&I, VF, CM_Scalarize, Cost); 5667 continue; 5668 } 5669 5670 // We assume that widening is the best solution when possible. 5671 if (memoryInstructionCanBeWidened(&I, VF)) { 5672 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 5673 int ConsecutiveStride = 5674 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 5675 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5676 "Expected consecutive stride."); 5677 InstWidening Decision = 5678 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 5679 setWideningDecision(&I, VF, Decision, Cost); 5680 continue; 5681 } 5682 5683 // Choose between Interleaving, Gather/Scatter or Scalarization. 5684 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 5685 unsigned NumAccesses = 1; 5686 if (isAccessInterleaved(&I)) { 5687 auto Group = getInterleavedAccessGroup(&I); 5688 assert(Group && "Fail to get an interleaved access group."); 5689 5690 // Make one decision for the whole group. 5691 if (getWideningDecision(&I, VF) != CM_Unknown) 5692 continue; 5693 5694 NumAccesses = Group->getNumMembers(); 5695 if (interleavedAccessCanBeWidened(&I, VF)) 5696 InterleaveCost = getInterleaveGroupCost(&I, VF); 5697 } 5698 5699 unsigned GatherScatterCost = 5700 isLegalGatherOrScatter(&I) 5701 ? getGatherScatterCost(&I, VF) * NumAccesses 5702 : std::numeric_limits<unsigned>::max(); 5703 5704 unsigned ScalarizationCost = 5705 getMemInstScalarizationCost(&I, VF) * NumAccesses; 5706 5707 // Choose better solution for the current VF, 5708 // write down this decision and use it during vectorization. 5709 unsigned Cost; 5710 InstWidening Decision; 5711 if (InterleaveCost <= GatherScatterCost && 5712 InterleaveCost < ScalarizationCost) { 5713 Decision = CM_Interleave; 5714 Cost = InterleaveCost; 5715 } else if (GatherScatterCost < ScalarizationCost) { 5716 Decision = CM_GatherScatter; 5717 Cost = GatherScatterCost; 5718 } else { 5719 Decision = CM_Scalarize; 5720 Cost = ScalarizationCost; 5721 } 5722 // If the instructions belongs to an interleave group, the whole group 5723 // receives the same decision. The whole group receives the cost, but 5724 // the cost will actually be assigned to one instruction. 5725 if (auto Group = getInterleavedAccessGroup(&I)) 5726 setWideningDecision(Group, VF, Decision, Cost); 5727 else 5728 setWideningDecision(&I, VF, Decision, Cost); 5729 } 5730 } 5731 5732 // Make sure that any load of address and any other address computation 5733 // remains scalar unless there is gather/scatter support. This avoids 5734 // inevitable extracts into address registers, and also has the benefit of 5735 // activating LSR more, since that pass can't optimize vectorized 5736 // addresses. 5737 if (TTI.prefersVectorizedAddressing()) 5738 return; 5739 5740 // Start with all scalar pointer uses. 5741 SmallPtrSet<Instruction *, 8> AddrDefs; 5742 for (BasicBlock *BB : TheLoop->blocks()) 5743 for (Instruction &I : *BB) { 5744 Instruction *PtrDef = 5745 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 5746 if (PtrDef && TheLoop->contains(PtrDef) && 5747 getWideningDecision(&I, VF) != CM_GatherScatter) 5748 AddrDefs.insert(PtrDef); 5749 } 5750 5751 // Add all instructions used to generate the addresses. 5752 SmallVector<Instruction *, 4> Worklist; 5753 for (auto *I : AddrDefs) 5754 Worklist.push_back(I); 5755 while (!Worklist.empty()) { 5756 Instruction *I = Worklist.pop_back_val(); 5757 for (auto &Op : I->operands()) 5758 if (auto *InstOp = dyn_cast<Instruction>(Op)) 5759 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 5760 AddrDefs.insert(InstOp).second) 5761 Worklist.push_back(InstOp); 5762 } 5763 5764 for (auto *I : AddrDefs) { 5765 if (isa<LoadInst>(I)) { 5766 // Setting the desired widening decision should ideally be handled in 5767 // by cost functions, but since this involves the task of finding out 5768 // if the loaded register is involved in an address computation, it is 5769 // instead changed here when we know this is the case. 5770 InstWidening Decision = getWideningDecision(I, VF); 5771 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 5772 // Scalarize a widened load of address. 5773 setWideningDecision(I, VF, CM_Scalarize, 5774 (VF * getMemoryInstructionCost(I, 1))); 5775 else if (auto Group = getInterleavedAccessGroup(I)) { 5776 // Scalarize an interleave group of address loads. 5777 for (unsigned I = 0; I < Group->getFactor(); ++I) { 5778 if (Instruction *Member = Group->getMember(I)) 5779 setWideningDecision(Member, VF, CM_Scalarize, 5780 (VF * getMemoryInstructionCost(Member, 1))); 5781 } 5782 } 5783 } else 5784 // Make sure I gets scalarized and a cost estimate without 5785 // scalarization overhead. 5786 ForcedScalars[VF].insert(I); 5787 } 5788 } 5789 5790 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5791 unsigned VF, 5792 Type *&VectorTy) { 5793 Type *RetTy = I->getType(); 5794 if (canTruncateToMinimalBitwidth(I, VF)) 5795 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5796 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 5797 auto SE = PSE.getSE(); 5798 5799 // TODO: We need to estimate the cost of intrinsic calls. 5800 switch (I->getOpcode()) { 5801 case Instruction::GetElementPtr: 5802 // We mark this instruction as zero-cost because the cost of GEPs in 5803 // vectorized code depends on whether the corresponding memory instruction 5804 // is scalarized or not. Therefore, we handle GEPs with the memory 5805 // instruction cost. 5806 return 0; 5807 case Instruction::Br: { 5808 // In cases of scalarized and predicated instructions, there will be VF 5809 // predicated blocks in the vectorized loop. Each branch around these 5810 // blocks requires also an extract of its vector compare i1 element. 5811 bool ScalarPredicatedBB = false; 5812 BranchInst *BI = cast<BranchInst>(I); 5813 if (VF > 1 && BI->isConditional() && 5814 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 5815 PredicatedBBsAfterVectorization.end() || 5816 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 5817 PredicatedBBsAfterVectorization.end())) 5818 ScalarPredicatedBB = true; 5819 5820 if (ScalarPredicatedBB) { 5821 // Return cost for branches around scalarized and predicated blocks. 5822 Type *Vec_i1Ty = 5823 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 5824 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 5825 (TTI.getCFInstrCost(Instruction::Br) * VF)); 5826 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 5827 // The back-edge branch will remain, as will all scalar branches. 5828 return TTI.getCFInstrCost(Instruction::Br); 5829 else 5830 // This branch will be eliminated by if-conversion. 5831 return 0; 5832 // Note: We currently assume zero cost for an unconditional branch inside 5833 // a predicated block since it will become a fall-through, although we 5834 // may decide in the future to call TTI for all branches. 5835 } 5836 case Instruction::PHI: { 5837 auto *Phi = cast<PHINode>(I); 5838 5839 // First-order recurrences are replaced by vector shuffles inside the loop. 5840 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 5841 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5842 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5843 VectorTy, VF - 1, VectorType::get(RetTy, 1)); 5844 5845 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 5846 // converted into select instructions. We require N - 1 selects per phi 5847 // node, where N is the number of incoming values. 5848 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 5849 return (Phi->getNumIncomingValues() - 1) * 5850 TTI.getCmpSelInstrCost( 5851 Instruction::Select, ToVectorTy(Phi->getType(), VF), 5852 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 5853 5854 return TTI.getCFInstrCost(Instruction::PHI); 5855 } 5856 case Instruction::UDiv: 5857 case Instruction::SDiv: 5858 case Instruction::URem: 5859 case Instruction::SRem: 5860 // If we have a predicated instruction, it may not be executed for each 5861 // vector lane. Get the scalarization cost and scale this amount by the 5862 // probability of executing the predicated block. If the instruction is not 5863 // predicated, we fall through to the next case. 5864 if (VF > 1 && isScalarWithPredication(I)) { 5865 unsigned Cost = 0; 5866 5867 // These instructions have a non-void type, so account for the phi nodes 5868 // that we will create. This cost is likely to be zero. The phi node 5869 // cost, if any, should be scaled by the block probability because it 5870 // models a copy at the end of each predicated block. 5871 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 5872 5873 // The cost of the non-predicated instruction. 5874 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 5875 5876 // The cost of insertelement and extractelement instructions needed for 5877 // scalarization. 5878 Cost += getScalarizationOverhead(I, VF, TTI); 5879 5880 // Scale the cost by the probability of executing the predicated blocks. 5881 // This assumes the predicated block for each vector lane is equally 5882 // likely. 5883 return Cost / getReciprocalPredBlockProb(); 5884 } 5885 LLVM_FALLTHROUGH; 5886 case Instruction::Add: 5887 case Instruction::FAdd: 5888 case Instruction::Sub: 5889 case Instruction::FSub: 5890 case Instruction::Mul: 5891 case Instruction::FMul: 5892 case Instruction::FDiv: 5893 case Instruction::FRem: 5894 case Instruction::Shl: 5895 case Instruction::LShr: 5896 case Instruction::AShr: 5897 case Instruction::And: 5898 case Instruction::Or: 5899 case Instruction::Xor: { 5900 // Since we will replace the stride by 1 the multiplication should go away. 5901 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5902 return 0; 5903 // Certain instructions can be cheaper to vectorize if they have a constant 5904 // second vector operand. One example of this are shifts on x86. 5905 Value *Op2 = I->getOperand(1); 5906 TargetTransformInfo::OperandValueProperties Op2VP; 5907 TargetTransformInfo::OperandValueKind Op2VK = 5908 TTI.getOperandInfo(Op2, Op2VP); 5909 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 5910 Op2VK = TargetTransformInfo::OK_UniformValue; 5911 5912 SmallVector<const Value *, 4> Operands(I->operand_values()); 5913 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 5914 return N * TTI.getArithmeticInstrCost( 5915 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 5916 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands); 5917 } 5918 case Instruction::Select: { 5919 SelectInst *SI = cast<SelectInst>(I); 5920 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5921 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5922 Type *CondTy = SI->getCondition()->getType(); 5923 if (!ScalarCond) 5924 CondTy = VectorType::get(CondTy, VF); 5925 5926 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 5927 } 5928 case Instruction::ICmp: 5929 case Instruction::FCmp: { 5930 Type *ValTy = I->getOperand(0)->getType(); 5931 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 5932 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 5933 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 5934 VectorTy = ToVectorTy(ValTy, VF); 5935 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 5936 } 5937 case Instruction::Store: 5938 case Instruction::Load: { 5939 unsigned Width = VF; 5940 if (Width > 1) { 5941 InstWidening Decision = getWideningDecision(I, Width); 5942 assert(Decision != CM_Unknown && 5943 "CM decision should be taken at this point"); 5944 if (Decision == CM_Scalarize) 5945 Width = 1; 5946 } 5947 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 5948 return getMemoryInstructionCost(I, VF); 5949 } 5950 case Instruction::ZExt: 5951 case Instruction::SExt: 5952 case Instruction::FPToUI: 5953 case Instruction::FPToSI: 5954 case Instruction::FPExt: 5955 case Instruction::PtrToInt: 5956 case Instruction::IntToPtr: 5957 case Instruction::SIToFP: 5958 case Instruction::UIToFP: 5959 case Instruction::Trunc: 5960 case Instruction::FPTrunc: 5961 case Instruction::BitCast: { 5962 // We optimize the truncation of induction variables having constant 5963 // integer steps. The cost of these truncations is the same as the scalar 5964 // operation. 5965 if (isOptimizableIVTruncate(I, VF)) { 5966 auto *Trunc = cast<TruncInst>(I); 5967 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 5968 Trunc->getSrcTy(), Trunc); 5969 } 5970 5971 Type *SrcScalarTy = I->getOperand(0)->getType(); 5972 Type *SrcVecTy = 5973 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 5974 if (canTruncateToMinimalBitwidth(I, VF)) { 5975 // This cast is going to be shrunk. This may remove the cast or it might 5976 // turn it into slightly different cast. For example, if MinBW == 16, 5977 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 5978 // 5979 // Calculate the modified src and dest types. 5980 Type *MinVecTy = VectorTy; 5981 if (I->getOpcode() == Instruction::Trunc) { 5982 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 5983 VectorTy = 5984 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 5985 } else if (I->getOpcode() == Instruction::ZExt || 5986 I->getOpcode() == Instruction::SExt) { 5987 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 5988 VectorTy = 5989 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 5990 } 5991 } 5992 5993 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 5994 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 5995 } 5996 case Instruction::Call: { 5997 bool NeedToScalarize; 5998 CallInst *CI = cast<CallInst>(I); 5999 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6000 if (getVectorIntrinsicIDForCall(CI, TLI)) 6001 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6002 return CallCost; 6003 } 6004 default: 6005 // The cost of executing VF copies of the scalar instruction. This opcode 6006 // is unknown. Assume that it is the same as 'mul'. 6007 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6008 getScalarizationOverhead(I, VF, TTI); 6009 } // end of switch. 6010 } 6011 6012 char LoopVectorize::ID = 0; 6013 6014 static const char lv_name[] = "Loop Vectorization"; 6015 6016 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6017 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6018 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6019 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6020 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6021 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6022 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6023 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6024 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6025 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6026 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6027 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6028 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6029 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6030 6031 namespace llvm { 6032 6033 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6034 bool VectorizeOnlyWhenForced) { 6035 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6036 } 6037 6038 } // end namespace llvm 6039 6040 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6041 // Check if the pointer operand of a load or store instruction is 6042 // consecutive. 6043 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6044 return Legal->isConsecutivePtr(Ptr); 6045 return false; 6046 } 6047 6048 void LoopVectorizationCostModel::collectValuesToIgnore() { 6049 // Ignore ephemeral values. 6050 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6051 6052 // Ignore type-promoting instructions we identified during reduction 6053 // detection. 6054 for (auto &Reduction : *Legal->getReductionVars()) { 6055 RecurrenceDescriptor &RedDes = Reduction.second; 6056 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6057 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6058 } 6059 // Ignore type-casting instructions we identified during induction 6060 // detection. 6061 for (auto &Induction : *Legal->getInductionVars()) { 6062 InductionDescriptor &IndDes = Induction.second; 6063 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6064 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6065 } 6066 } 6067 6068 VectorizationFactor 6069 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize, 6070 unsigned UserVF) { 6071 // Width 1 means no vectorization, cost 0 means uncomputed cost. 6072 const VectorizationFactor NoVectorization = {1U, 0U}; 6073 6074 // Outer loop handling: They may require CFG and instruction level 6075 // transformations before even evaluating whether vectorization is profitable. 6076 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6077 // the vectorization pipeline. 6078 if (!OrigLoop->empty()) { 6079 // TODO: If UserVF is not provided, we set UserVF to 4 for stress testing. 6080 // This won't be necessary when UserVF is not required in the VPlan-native 6081 // path. 6082 if (VPlanBuildStressTest && !UserVF) 6083 UserVF = 4; 6084 6085 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6086 assert(UserVF && "Expected UserVF for outer loop vectorization."); 6087 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6088 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6089 buildVPlans(UserVF, UserVF); 6090 6091 // For VPlan build stress testing, we bail out after VPlan construction. 6092 if (VPlanBuildStressTest) 6093 return NoVectorization; 6094 6095 return {UserVF, 0}; 6096 } 6097 6098 LLVM_DEBUG( 6099 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6100 "VPlan-native path.\n"); 6101 return NoVectorization; 6102 } 6103 6104 VectorizationFactor 6105 LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) { 6106 assert(OrigLoop->empty() && "Inner loop expected."); 6107 // Width 1 means no vectorization, cost 0 means uncomputed cost. 6108 const VectorizationFactor NoVectorization = {1U, 0U}; 6109 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 6110 if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize. 6111 return NoVectorization; 6112 6113 // Invalidate interleave groups if all blocks of loop will be predicated. 6114 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6115 !useMaskedInterleavedAccesses(*TTI)) { 6116 LLVM_DEBUG( 6117 dbgs() 6118 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6119 "which requires masked-interleaved support.\n"); 6120 CM.InterleaveInfo.reset(); 6121 } 6122 6123 if (UserVF) { 6124 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6125 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6126 // Collect the instructions (and their associated costs) that will be more 6127 // profitable to scalarize. 6128 CM.selectUserVectorizationFactor(UserVF); 6129 buildVPlansWithVPRecipes(UserVF, UserVF); 6130 LLVM_DEBUG(printPlans(dbgs())); 6131 return {UserVF, 0}; 6132 } 6133 6134 unsigned MaxVF = MaybeMaxVF.getValue(); 6135 assert(MaxVF != 0 && "MaxVF is zero."); 6136 6137 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6138 // Collect Uniform and Scalar instructions after vectorization with VF. 6139 CM.collectUniformsAndScalars(VF); 6140 6141 // Collect the instructions (and their associated costs) that will be more 6142 // profitable to scalarize. 6143 if (VF > 1) 6144 CM.collectInstsToScalarize(VF); 6145 } 6146 6147 buildVPlansWithVPRecipes(1, MaxVF); 6148 LLVM_DEBUG(printPlans(dbgs())); 6149 if (MaxVF == 1) 6150 return NoVectorization; 6151 6152 // Select the optimal vectorization factor. 6153 return CM.selectVectorizationFactor(MaxVF); 6154 } 6155 6156 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6157 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6158 << '\n'); 6159 BestVF = VF; 6160 BestUF = UF; 6161 6162 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6163 return !Plan->hasVF(VF); 6164 }); 6165 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6166 } 6167 6168 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6169 DominatorTree *DT) { 6170 // Perform the actual loop transformation. 6171 6172 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6173 VPCallbackILV CallbackILV(ILV); 6174 6175 VPTransformState State{BestVF, BestUF, LI, 6176 DT, ILV.Builder, ILV.VectorLoopValueMap, 6177 &ILV, CallbackILV}; 6178 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6179 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6180 6181 //===------------------------------------------------===// 6182 // 6183 // Notice: any optimization or new instruction that go 6184 // into the code below should also be implemented in 6185 // the cost-model. 6186 // 6187 //===------------------------------------------------===// 6188 6189 // 2. Copy and widen instructions from the old loop into the new loop. 6190 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6191 VPlans.front()->execute(&State); 6192 6193 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6194 // predication, updating analyses. 6195 ILV.fixVectorizedLoop(); 6196 } 6197 6198 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6199 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6200 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6201 6202 // We create new control-flow for the vectorized loop, so the original 6203 // condition will be dead after vectorization if it's only used by the 6204 // branch. 6205 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6206 if (Cmp && Cmp->hasOneUse()) 6207 DeadInstructions.insert(Cmp); 6208 6209 // We create new "steps" for induction variable updates to which the original 6210 // induction variables map. An original update instruction will be dead if 6211 // all its users except the induction variable are dead. 6212 for (auto &Induction : *Legal->getInductionVars()) { 6213 PHINode *Ind = Induction.first; 6214 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6215 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6216 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6217 DeadInstructions.end(); 6218 })) 6219 DeadInstructions.insert(IndUpdate); 6220 6221 // We record as "Dead" also the type-casting instructions we had identified 6222 // during induction analysis. We don't need any handling for them in the 6223 // vectorized loop because we have proven that, under a proper runtime 6224 // test guarding the vectorized loop, the value of the phi, and the casted 6225 // value of the phi, are the same. The last instruction in this casting chain 6226 // will get its scalar/vector/widened def from the scalar/vector/widened def 6227 // of the respective phi node. Any other casts in the induction def-use chain 6228 // have no other uses outside the phi update chain, and will be ignored. 6229 InductionDescriptor &IndDes = Induction.second; 6230 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6231 DeadInstructions.insert(Casts.begin(), Casts.end()); 6232 } 6233 } 6234 6235 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6236 6237 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6238 6239 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6240 Instruction::BinaryOps BinOp) { 6241 // When unrolling and the VF is 1, we only need to add a simple scalar. 6242 Type *Ty = Val->getType(); 6243 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6244 6245 if (Ty->isFloatingPointTy()) { 6246 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6247 6248 // Floating point operations had to be 'fast' to enable the unrolling. 6249 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6250 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6251 } 6252 Constant *C = ConstantInt::get(Ty, StartIdx); 6253 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6254 } 6255 6256 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6257 SmallVector<Metadata *, 4> MDs; 6258 // Reserve first location for self reference to the LoopID metadata node. 6259 MDs.push_back(nullptr); 6260 bool IsUnrollMetadata = false; 6261 MDNode *LoopID = L->getLoopID(); 6262 if (LoopID) { 6263 // First find existing loop unrolling disable metadata. 6264 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6265 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6266 if (MD) { 6267 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6268 IsUnrollMetadata = 6269 S && S->getString().startswith("llvm.loop.unroll.disable"); 6270 } 6271 MDs.push_back(LoopID->getOperand(i)); 6272 } 6273 } 6274 6275 if (!IsUnrollMetadata) { 6276 // Add runtime unroll disable metadata. 6277 LLVMContext &Context = L->getHeader()->getContext(); 6278 SmallVector<Metadata *, 1> DisableOperands; 6279 DisableOperands.push_back( 6280 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6281 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6282 MDs.push_back(DisableNode); 6283 MDNode *NewLoopID = MDNode::get(Context, MDs); 6284 // Set operand 0 to refer to the loop id itself. 6285 NewLoopID->replaceOperandWith(0, NewLoopID); 6286 L->setLoopID(NewLoopID); 6287 } 6288 } 6289 6290 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6291 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6292 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6293 bool PredicateAtRangeStart = Predicate(Range.Start); 6294 6295 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6296 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6297 Range.End = TmpVF; 6298 break; 6299 } 6300 6301 return PredicateAtRangeStart; 6302 } 6303 6304 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6305 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6306 /// of VF's starting at a given VF and extending it as much as possible. Each 6307 /// vectorization decision can potentially shorten this sub-range during 6308 /// buildVPlan(). 6309 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6310 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6311 VFRange SubRange = {VF, MaxVF + 1}; 6312 VPlans.push_back(buildVPlan(SubRange)); 6313 VF = SubRange.End; 6314 } 6315 } 6316 6317 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6318 VPlanPtr &Plan) { 6319 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6320 6321 // Look for cached value. 6322 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6323 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6324 if (ECEntryIt != EdgeMaskCache.end()) 6325 return ECEntryIt->second; 6326 6327 VPValue *SrcMask = createBlockInMask(Src, Plan); 6328 6329 // The terminator has to be a branch inst! 6330 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6331 assert(BI && "Unexpected terminator found"); 6332 6333 if (!BI->isConditional()) 6334 return EdgeMaskCache[Edge] = SrcMask; 6335 6336 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6337 assert(EdgeMask && "No Edge Mask found for condition"); 6338 6339 if (BI->getSuccessor(0) != Dst) 6340 EdgeMask = Builder.createNot(EdgeMask); 6341 6342 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6343 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6344 6345 return EdgeMaskCache[Edge] = EdgeMask; 6346 } 6347 6348 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6349 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6350 6351 // Look for cached value. 6352 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6353 if (BCEntryIt != BlockMaskCache.end()) 6354 return BCEntryIt->second; 6355 6356 // All-one mask is modelled as no-mask following the convention for masked 6357 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6358 VPValue *BlockMask = nullptr; 6359 6360 if (OrigLoop->getHeader() == BB) { 6361 if (!CM.blockNeedsPredication(BB)) 6362 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6363 6364 // Introduce the early-exit compare IV <= BTC to form header block mask. 6365 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6366 VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6367 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6368 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6369 return BlockMaskCache[BB] = BlockMask; 6370 } 6371 6372 // This is the block mask. We OR all incoming edges. 6373 for (auto *Predecessor : predecessors(BB)) { 6374 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6375 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6376 return BlockMaskCache[BB] = EdgeMask; 6377 6378 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6379 BlockMask = EdgeMask; 6380 continue; 6381 } 6382 6383 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6384 } 6385 6386 return BlockMaskCache[BB] = BlockMask; 6387 } 6388 6389 VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I, 6390 VFRange &Range, 6391 VPlanPtr &Plan) { 6392 const InterleaveGroup<Instruction> *IG = CM.getInterleavedAccessGroup(I); 6393 if (!IG) 6394 return nullptr; 6395 6396 // Now check if IG is relevant for VF's in the given range. 6397 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 6398 return [=](unsigned VF) -> bool { 6399 return (VF >= 2 && // Query is illegal for VF == 1 6400 CM.getWideningDecision(I, VF) == 6401 LoopVectorizationCostModel::CM_Interleave); 6402 }; 6403 }; 6404 if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range)) 6405 return nullptr; 6406 6407 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 6408 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 6409 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 6410 assert(I == IG->getInsertPos() && 6411 "Generating a recipe for an adjunct member of an interleave group"); 6412 6413 VPValue *Mask = nullptr; 6414 if (Legal->isMaskRequired(I)) 6415 Mask = createBlockInMask(I->getParent(), Plan); 6416 6417 return new VPInterleaveRecipe(IG, Mask); 6418 } 6419 6420 VPWidenMemoryInstructionRecipe * 6421 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6422 VPlanPtr &Plan) { 6423 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 6424 return nullptr; 6425 6426 auto willWiden = [&](unsigned VF) -> bool { 6427 if (VF == 1) 6428 return false; 6429 if (CM.isScalarAfterVectorization(I, VF) || 6430 CM.isProfitableToScalarize(I, VF)) 6431 return false; 6432 LoopVectorizationCostModel::InstWidening Decision = 6433 CM.getWideningDecision(I, VF); 6434 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6435 "CM decision should be taken at this point."); 6436 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 6437 "Interleave memory opportunity should be caught earlier."); 6438 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6439 }; 6440 6441 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6442 return nullptr; 6443 6444 VPValue *Mask = nullptr; 6445 if (Legal->isMaskRequired(I)) 6446 Mask = createBlockInMask(I->getParent(), Plan); 6447 6448 return new VPWidenMemoryInstructionRecipe(*I, Mask); 6449 } 6450 6451 VPWidenIntOrFpInductionRecipe * 6452 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) { 6453 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 6454 // Check if this is an integer or fp induction. If so, build the recipe that 6455 // produces its scalar and vector values. 6456 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 6457 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6458 II.getKind() == InductionDescriptor::IK_FpInduction) 6459 return new VPWidenIntOrFpInductionRecipe(Phi); 6460 6461 return nullptr; 6462 } 6463 6464 // Optimize the special case where the source is a constant integer 6465 // induction variable. Notice that we can only optimize the 'trunc' case 6466 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6467 // (c) other casts depend on pointer size. 6468 6469 // Determine whether \p K is a truncation based on an induction variable that 6470 // can be optimized. 6471 auto isOptimizableIVTruncate = 6472 [&](Instruction *K) -> std::function<bool(unsigned)> { 6473 return 6474 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6475 }; 6476 6477 if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange( 6478 isOptimizableIVTruncate(I), Range)) 6479 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6480 cast<TruncInst>(I)); 6481 return nullptr; 6482 } 6483 6484 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) { 6485 PHINode *Phi = dyn_cast<PHINode>(I); 6486 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 6487 return nullptr; 6488 6489 // We know that all PHIs in non-header blocks are converted into selects, so 6490 // we don't have to worry about the insertion order and we can just use the 6491 // builder. At this point we generate the predication tree. There may be 6492 // duplications since this is a simple recursive scan, but future 6493 // optimizations will clean it up. 6494 6495 SmallVector<VPValue *, 2> Masks; 6496 unsigned NumIncoming = Phi->getNumIncomingValues(); 6497 for (unsigned In = 0; In < NumIncoming; In++) { 6498 VPValue *EdgeMask = 6499 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6500 assert((EdgeMask || NumIncoming == 1) && 6501 "Multiple predecessors with one having a full mask"); 6502 if (EdgeMask) 6503 Masks.push_back(EdgeMask); 6504 } 6505 return new VPBlendRecipe(Phi, Masks); 6506 } 6507 6508 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 6509 VFRange &Range) { 6510 6511 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6512 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6513 6514 if (IsPredicated) 6515 return false; 6516 6517 auto IsVectorizableOpcode = [](unsigned Opcode) { 6518 switch (Opcode) { 6519 case Instruction::Add: 6520 case Instruction::And: 6521 case Instruction::AShr: 6522 case Instruction::BitCast: 6523 case Instruction::Br: 6524 case Instruction::Call: 6525 case Instruction::FAdd: 6526 case Instruction::FCmp: 6527 case Instruction::FDiv: 6528 case Instruction::FMul: 6529 case Instruction::FPExt: 6530 case Instruction::FPToSI: 6531 case Instruction::FPToUI: 6532 case Instruction::FPTrunc: 6533 case Instruction::FRem: 6534 case Instruction::FSub: 6535 case Instruction::GetElementPtr: 6536 case Instruction::ICmp: 6537 case Instruction::IntToPtr: 6538 case Instruction::Load: 6539 case Instruction::LShr: 6540 case Instruction::Mul: 6541 case Instruction::Or: 6542 case Instruction::PHI: 6543 case Instruction::PtrToInt: 6544 case Instruction::SDiv: 6545 case Instruction::Select: 6546 case Instruction::SExt: 6547 case Instruction::Shl: 6548 case Instruction::SIToFP: 6549 case Instruction::SRem: 6550 case Instruction::Store: 6551 case Instruction::Sub: 6552 case Instruction::Trunc: 6553 case Instruction::UDiv: 6554 case Instruction::UIToFP: 6555 case Instruction::URem: 6556 case Instruction::Xor: 6557 case Instruction::ZExt: 6558 return true; 6559 } 6560 return false; 6561 }; 6562 6563 if (!IsVectorizableOpcode(I->getOpcode())) 6564 return false; 6565 6566 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6567 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6568 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6569 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6570 return false; 6571 } 6572 6573 auto willWiden = [&](unsigned VF) -> bool { 6574 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 6575 CM.isProfitableToScalarize(I, VF))) 6576 return false; 6577 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6578 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6579 // The following case may be scalarized depending on the VF. 6580 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6581 // version of the instruction. 6582 // Is it beneficial to perform intrinsic call compared to lib call? 6583 bool NeedToScalarize; 6584 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 6585 bool UseVectorIntrinsic = 6586 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 6587 return UseVectorIntrinsic || !NeedToScalarize; 6588 } 6589 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 6590 assert(CM.getWideningDecision(I, VF) == 6591 LoopVectorizationCostModel::CM_Scalarize && 6592 "Memory widening decisions should have been taken care by now"); 6593 return false; 6594 } 6595 return true; 6596 }; 6597 6598 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6599 return false; 6600 6601 // Success: widen this instruction. We optimize the common case where 6602 // consecutive instructions can be represented by a single recipe. 6603 if (!VPBB->empty()) { 6604 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 6605 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 6606 return true; 6607 } 6608 6609 VPBB->appendRecipe(new VPWidenRecipe(I)); 6610 return true; 6611 } 6612 6613 VPBasicBlock *VPRecipeBuilder::handleReplication( 6614 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 6615 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 6616 VPlanPtr &Plan) { 6617 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 6618 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 6619 Range); 6620 6621 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6622 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6623 6624 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 6625 6626 // Find if I uses a predicated instruction. If so, it will use its scalar 6627 // value. Avoid hoisting the insert-element which packs the scalar value into 6628 // a vector value, as that happens iff all users use the vector value. 6629 for (auto &Op : I->operands()) 6630 if (auto *PredInst = dyn_cast<Instruction>(Op)) 6631 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 6632 PredInst2Recipe[PredInst]->setAlsoPack(false); 6633 6634 // Finalize the recipe for Instr, first if it is not predicated. 6635 if (!IsPredicated) { 6636 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 6637 VPBB->appendRecipe(Recipe); 6638 return VPBB; 6639 } 6640 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 6641 assert(VPBB->getSuccessors().empty() && 6642 "VPBB has successors when handling predicated replication."); 6643 // Record predicated instructions for above packing optimizations. 6644 PredInst2Recipe[I] = Recipe; 6645 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 6646 VPBlockUtils::insertBlockAfter(Region, VPBB); 6647 auto *RegSucc = new VPBasicBlock(); 6648 VPBlockUtils::insertBlockAfter(RegSucc, Region); 6649 return RegSucc; 6650 } 6651 6652 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 6653 VPRecipeBase *PredRecipe, 6654 VPlanPtr &Plan) { 6655 // Instructions marked for predication are replicated and placed under an 6656 // if-then construct to prevent side-effects. 6657 6658 // Generate recipes to compute the block mask for this region. 6659 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 6660 6661 // Build the triangular if-then region. 6662 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 6663 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 6664 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 6665 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 6666 auto *PHIRecipe = 6667 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 6668 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 6669 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 6670 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 6671 6672 // Note: first set Entry as region entry and then connect successors starting 6673 // from it in order, to propagate the "parent" of each VPBasicBlock. 6674 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 6675 VPBlockUtils::connectBlocks(Pred, Exit); 6676 6677 return Region; 6678 } 6679 6680 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range, 6681 VPlanPtr &Plan, VPBasicBlock *VPBB) { 6682 VPRecipeBase *Recipe = nullptr; 6683 // Check if Instr should belong to an interleave memory recipe, or already 6684 // does. In the latter case Instr is irrelevant. 6685 if ((Recipe = tryToInterleaveMemory(Instr, Range, Plan))) { 6686 VPBB->appendRecipe(Recipe); 6687 return true; 6688 } 6689 6690 // Check if Instr is a memory operation that should be widened. 6691 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 6692 VPBB->appendRecipe(Recipe); 6693 return true; 6694 } 6695 6696 // Check if Instr should form some PHI recipe. 6697 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 6698 VPBB->appendRecipe(Recipe); 6699 return true; 6700 } 6701 if ((Recipe = tryToBlend(Instr, Plan))) { 6702 VPBB->appendRecipe(Recipe); 6703 return true; 6704 } 6705 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 6706 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 6707 return true; 6708 } 6709 6710 // Check if Instr is to be widened by a general VPWidenRecipe, after 6711 // having first checked for specific widening recipes that deal with 6712 // Interleave Groups, Inductions and Phi nodes. 6713 if (tryToWiden(Instr, VPBB, Range)) 6714 return true; 6715 6716 return false; 6717 } 6718 6719 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 6720 unsigned MaxVF) { 6721 assert(OrigLoop->empty() && "Inner loop expected."); 6722 6723 // Collect conditions feeding internal conditional branches; they need to be 6724 // represented in VPlan for it to model masking. 6725 SmallPtrSet<Value *, 1> NeedDef; 6726 6727 auto *Latch = OrigLoop->getLoopLatch(); 6728 for (BasicBlock *BB : OrigLoop->blocks()) { 6729 if (BB == Latch) 6730 continue; 6731 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 6732 if (Branch && Branch->isConditional()) 6733 NeedDef.insert(Branch->getCondition()); 6734 } 6735 6736 // If the tail is to be folded by masking, the primary induction variable 6737 // needs to be represented in VPlan for it to model early-exit masking. 6738 if (CM.foldTailByMasking()) 6739 NeedDef.insert(Legal->getPrimaryInduction()); 6740 6741 // Collect instructions from the original loop that will become trivially dead 6742 // in the vectorized loop. We don't need to vectorize these instructions. For 6743 // example, original induction update instructions can become dead because we 6744 // separately emit induction "steps" when generating code for the new loop. 6745 // Similarly, we create a new latch condition when setting up the structure 6746 // of the new loop, so the old one can become dead. 6747 SmallPtrSet<Instruction *, 4> DeadInstructions; 6748 collectTriviallyDeadInstructions(DeadInstructions); 6749 6750 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6751 VFRange SubRange = {VF, MaxVF + 1}; 6752 VPlans.push_back( 6753 buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions)); 6754 VF = SubRange.End; 6755 } 6756 } 6757 6758 LoopVectorizationPlanner::VPlanPtr 6759 LoopVectorizationPlanner::buildVPlanWithVPRecipes( 6760 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 6761 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6762 // Hold a mapping from predicated instructions to their recipes, in order to 6763 // fix their AlsoPack behavior if a user is determined to replicate and use a 6764 // scalar instead of vector value. 6765 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 6766 6767 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 6768 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 6769 6770 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 6771 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 6772 auto Plan = llvm::make_unique<VPlan>(VPBB); 6773 6774 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, TTI, Legal, CM, Builder); 6775 // Represent values that will have defs inside VPlan. 6776 for (Value *V : NeedDef) 6777 Plan->addVPValue(V); 6778 6779 // Scan the body of the loop in a topological order to visit each basic block 6780 // after having visited its predecessor basic blocks. 6781 LoopBlocksDFS DFS(OrigLoop); 6782 DFS.perform(LI); 6783 6784 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6785 // Relevant instructions from basic block BB will be grouped into VPRecipe 6786 // ingredients and fill a new VPBasicBlock. 6787 unsigned VPBBsForBB = 0; 6788 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 6789 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 6790 VPBB = FirstVPBBForBB; 6791 Builder.setInsertPoint(VPBB); 6792 6793 std::vector<Instruction *> Ingredients; 6794 6795 // Organize the ingredients to vectorize from current basic block in the 6796 // right order. 6797 for (Instruction &I : BB->instructionsWithoutDebug()) { 6798 Instruction *Instr = &I; 6799 6800 // First filter out irrelevant instructions, to ensure no recipes are 6801 // built for them. 6802 if (isa<BranchInst>(Instr) || 6803 DeadInstructions.find(Instr) != DeadInstructions.end()) 6804 continue; 6805 6806 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 6807 // member of the IG, do not construct any Recipe for it. 6808 const InterleaveGroup<Instruction> *IG = 6809 CM.getInterleavedAccessGroup(Instr); 6810 if (IG && Instr != IG->getInsertPos() && 6811 Range.Start >= 2 && // Query is illegal for VF == 1 6812 CM.getWideningDecision(Instr, Range.Start) == 6813 LoopVectorizationCostModel::CM_Interleave) { 6814 auto SinkCandidate = SinkAfterInverse.find(Instr); 6815 if (SinkCandidate != SinkAfterInverse.end()) 6816 Ingredients.push_back(SinkCandidate->second); 6817 continue; 6818 } 6819 6820 // Move instructions to handle first-order recurrences, step 1: avoid 6821 // handling this instruction until after we've handled the instruction it 6822 // should follow. 6823 auto SAIt = SinkAfter.find(Instr); 6824 if (SAIt != SinkAfter.end()) { 6825 LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" 6826 << *SAIt->second 6827 << " to vectorize a 1st order recurrence.\n"); 6828 SinkAfterInverse[SAIt->second] = Instr; 6829 continue; 6830 } 6831 6832 Ingredients.push_back(Instr); 6833 6834 // Move instructions to handle first-order recurrences, step 2: push the 6835 // instruction to be sunk at its insertion point. 6836 auto SAInvIt = SinkAfterInverse.find(Instr); 6837 if (SAInvIt != SinkAfterInverse.end()) 6838 Ingredients.push_back(SAInvIt->second); 6839 } 6840 6841 // Introduce each ingredient into VPlan. 6842 for (Instruction *Instr : Ingredients) { 6843 if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB)) 6844 continue; 6845 6846 // Otherwise, if all widening options failed, Instruction is to be 6847 // replicated. This may create a successor for VPBB. 6848 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 6849 Instr, Range, VPBB, PredInst2Recipe, Plan); 6850 if (NextVPBB != VPBB) { 6851 VPBB = NextVPBB; 6852 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 6853 : ""); 6854 } 6855 } 6856 } 6857 6858 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 6859 // may also be empty, such as the last one VPBB, reflecting original 6860 // basic-blocks with no recipes. 6861 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 6862 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 6863 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 6864 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 6865 delete PreEntry; 6866 6867 std::string PlanName; 6868 raw_string_ostream RSO(PlanName); 6869 unsigned VF = Range.Start; 6870 Plan->addVF(VF); 6871 RSO << "Initial VPlan for VF={" << VF; 6872 for (VF *= 2; VF < Range.End; VF *= 2) { 6873 Plan->addVF(VF); 6874 RSO << "," << VF; 6875 } 6876 RSO << "},UF>=1"; 6877 RSO.flush(); 6878 Plan->setName(PlanName); 6879 6880 return Plan; 6881 } 6882 6883 LoopVectorizationPlanner::VPlanPtr 6884 LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 6885 // Outer loop handling: They may require CFG and instruction level 6886 // transformations before even evaluating whether vectorization is profitable. 6887 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6888 // the vectorization pipeline. 6889 assert(!OrigLoop->empty()); 6890 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6891 6892 // Create new empty VPlan 6893 auto Plan = llvm::make_unique<VPlan>(); 6894 6895 // Build hierarchical CFG 6896 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 6897 HCFGBuilder.buildHierarchicalCFG(); 6898 6899 SmallPtrSet<Instruction *, 1> DeadInstructions; 6900 VPlanHCFGTransforms::VPInstructionsToVPRecipes( 6901 Plan, Legal->getInductionVars(), DeadInstructions); 6902 6903 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 6904 Plan->addVF(VF); 6905 6906 return Plan; 6907 } 6908 6909 Value* LoopVectorizationPlanner::VPCallbackILV:: 6910 getOrCreateVectorValues(Value *V, unsigned Part) { 6911 return ILV.getOrCreateVectorValue(V, Part); 6912 } 6913 6914 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 6915 O << " +\n" 6916 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 6917 IG->getInsertPos()->printAsOperand(O, false); 6918 if (User) { 6919 O << ", "; 6920 User->getOperand(0)->printAsOperand(O); 6921 } 6922 O << "\\l\""; 6923 for (unsigned i = 0; i < IG->getFactor(); ++i) 6924 if (Instruction *I = IG->getMember(i)) 6925 O << " +\n" 6926 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 6927 } 6928 6929 void VPWidenRecipe::execute(VPTransformState &State) { 6930 for (auto &Instr : make_range(Begin, End)) 6931 State.ILV->widenInstruction(Instr); 6932 } 6933 6934 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 6935 assert(!State.Instance && "Int or FP induction being replicated."); 6936 State.ILV->widenIntOrFpInduction(IV, Trunc); 6937 } 6938 6939 void VPWidenPHIRecipe::execute(VPTransformState &State) { 6940 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 6941 } 6942 6943 void VPBlendRecipe::execute(VPTransformState &State) { 6944 State.ILV->setDebugLocFromInst(State.Builder, Phi); 6945 // We know that all PHIs in non-header blocks are converted into 6946 // selects, so we don't have to worry about the insertion order and we 6947 // can just use the builder. 6948 // At this point we generate the predication tree. There may be 6949 // duplications since this is a simple recursive scan, but future 6950 // optimizations will clean it up. 6951 6952 unsigned NumIncoming = Phi->getNumIncomingValues(); 6953 6954 assert((User || NumIncoming == 1) && 6955 "Multiple predecessors with predecessors having a full mask"); 6956 // Generate a sequence of selects of the form: 6957 // SELECT(Mask3, In3, 6958 // SELECT(Mask2, In2, 6959 // ( ...))) 6960 InnerLoopVectorizer::VectorParts Entry(State.UF); 6961 for (unsigned In = 0; In < NumIncoming; ++In) { 6962 for (unsigned Part = 0; Part < State.UF; ++Part) { 6963 // We might have single edge PHIs (blocks) - use an identity 6964 // 'select' for the first PHI operand. 6965 Value *In0 = 6966 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 6967 if (In == 0) 6968 Entry[Part] = In0; // Initialize with the first incoming value. 6969 else { 6970 // Select between the current value and the previous incoming edge 6971 // based on the incoming mask. 6972 Value *Cond = State.get(User->getOperand(In), Part); 6973 Entry[Part] = 6974 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 6975 } 6976 } 6977 } 6978 for (unsigned Part = 0; Part < State.UF; ++Part) 6979 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 6980 } 6981 6982 void VPInterleaveRecipe::execute(VPTransformState &State) { 6983 assert(!State.Instance && "Interleave group being replicated."); 6984 if (!User) 6985 return State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 6986 6987 // Last (and currently only) operand is a mask. 6988 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 6989 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 6990 for (unsigned Part = 0; Part < State.UF; ++Part) 6991 MaskValues[Part] = State.get(Mask, Part); 6992 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), &MaskValues); 6993 } 6994 6995 void VPReplicateRecipe::execute(VPTransformState &State) { 6996 if (State.Instance) { // Generate a single instance. 6997 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 6998 // Insert scalar instance packing it into a vector. 6999 if (AlsoPack && State.VF > 1) { 7000 // If we're constructing lane 0, initialize to start from undef. 7001 if (State.Instance->Lane == 0) { 7002 Value *Undef = 7003 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7004 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7005 } 7006 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7007 } 7008 return; 7009 } 7010 7011 // Generate scalar instances for all VF lanes of all UF parts, unless the 7012 // instruction is uniform inwhich case generate only the first lane for each 7013 // of the UF parts. 7014 unsigned EndLane = IsUniform ? 1 : State.VF; 7015 for (unsigned Part = 0; Part < State.UF; ++Part) 7016 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7017 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7018 } 7019 7020 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7021 assert(State.Instance && "Branch on Mask works only on single instance."); 7022 7023 unsigned Part = State.Instance->Part; 7024 unsigned Lane = State.Instance->Lane; 7025 7026 Value *ConditionBit = nullptr; 7027 if (!User) // Block in mask is all-one. 7028 ConditionBit = State.Builder.getTrue(); 7029 else { 7030 VPValue *BlockInMask = User->getOperand(0); 7031 ConditionBit = State.get(BlockInMask, Part); 7032 if (ConditionBit->getType()->isVectorTy()) 7033 ConditionBit = State.Builder.CreateExtractElement( 7034 ConditionBit, State.Builder.getInt32(Lane)); 7035 } 7036 7037 // Replace the temporary unreachable terminator with a new conditional branch, 7038 // whose two destinations will be set later when they are created. 7039 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7040 assert(isa<UnreachableInst>(CurrentTerminator) && 7041 "Expected to replace unreachable terminator with conditional branch."); 7042 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7043 CondBr->setSuccessor(0, nullptr); 7044 ReplaceInstWithInst(CurrentTerminator, CondBr); 7045 } 7046 7047 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7048 assert(State.Instance && "Predicated instruction PHI works per instance."); 7049 Instruction *ScalarPredInst = cast<Instruction>( 7050 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7051 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7052 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7053 assert(PredicatingBB && "Predicated block has no single predecessor."); 7054 7055 // By current pack/unpack logic we need to generate only a single phi node: if 7056 // a vector value for the predicated instruction exists at this point it means 7057 // the instruction has vector users only, and a phi for the vector value is 7058 // needed. In this case the recipe of the predicated instruction is marked to 7059 // also do that packing, thereby "hoisting" the insert-element sequence. 7060 // Otherwise, a phi node for the scalar value is needed. 7061 unsigned Part = State.Instance->Part; 7062 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7063 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7064 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7065 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7066 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7067 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7068 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7069 } else { 7070 Type *PredInstType = PredInst->getType(); 7071 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7072 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7073 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7074 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7075 } 7076 } 7077 7078 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7079 if (!User) 7080 return State.ILV->vectorizeMemoryInstruction(&Instr); 7081 7082 // Last (and currently only) operand is a mask. 7083 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7084 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7085 for (unsigned Part = 0; Part < State.UF; ++Part) 7086 MaskValues[Part] = State.get(Mask, Part); 7087 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 7088 } 7089 7090 // Process the loop in the VPlan-native vectorization path. This path builds 7091 // VPlan upfront in the vectorization pipeline, which allows to apply 7092 // VPlan-to-VPlan transformations from the very beginning without modifying the 7093 // input LLVM IR. 7094 static bool processLoopInVPlanNativePath( 7095 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7096 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7097 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7098 OptimizationRemarkEmitter *ORE, LoopVectorizeHints &Hints) { 7099 7100 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7101 Function *F = L->getHeader()->getParent(); 7102 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7103 LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7104 &Hints, IAI); 7105 // Use the planner for outer loop vectorization. 7106 // TODO: CM is not used at this point inside the planner. Turn CM into an 7107 // optional argument if we don't need it in the future. 7108 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM); 7109 7110 // Get user vectorization factor. 7111 unsigned UserVF = Hints.getWidth(); 7112 7113 // Check the function attributes to find out if this function should be 7114 // optimized for size. 7115 bool OptForSize = 7116 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7117 7118 // Plan how to best vectorize, return the best VF and its cost. 7119 VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF); 7120 7121 // If we are stress testing VPlan builds, do not attempt to generate vector 7122 // code. 7123 if (VPlanBuildStressTest) 7124 return false; 7125 7126 LVP.setBestPlan(VF.Width, 1); 7127 7128 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, UserVF, 1, LVL, 7129 &CM); 7130 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7131 << L->getHeader()->getParent()->getName() << "\"\n"); 7132 LVP.executePlan(LB, DT); 7133 7134 // Mark the loop as already vectorized to avoid vectorizing again. 7135 Hints.setAlreadyVectorized(); 7136 7137 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7138 return true; 7139 } 7140 7141 bool LoopVectorizePass::processLoop(Loop *L) { 7142 assert((EnableVPlanNativePath || L->empty()) && 7143 "VPlan-native path is not enabled. Only process inner loops."); 7144 7145 #ifndef NDEBUG 7146 const std::string DebugLocStr = getDebugLocString(L); 7147 #endif /* NDEBUG */ 7148 7149 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7150 << L->getHeader()->getParent()->getName() << "\" from " 7151 << DebugLocStr << "\n"); 7152 7153 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 7154 7155 LLVM_DEBUG( 7156 dbgs() << "LV: Loop hints:" 7157 << " force=" 7158 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7159 ? "disabled" 7160 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7161 ? "enabled" 7162 : "?")) 7163 << " width=" << Hints.getWidth() 7164 << " unroll=" << Hints.getInterleave() << "\n"); 7165 7166 // Function containing loop 7167 Function *F = L->getHeader()->getParent(); 7168 7169 // Looking at the diagnostic output is the only way to determine if a loop 7170 // was vectorized (other than looking at the IR or machine code), so it 7171 // is important to generate an optimization remark for each loop. Most of 7172 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7173 // generated as OptimizationRemark and OptimizationRemarkMissed are 7174 // less verbose reporting vectorized loops and unvectorized loops that may 7175 // benefit from vectorization, respectively. 7176 7177 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 7178 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7179 return false; 7180 } 7181 7182 PredicatedScalarEvolution PSE(*SE, *L); 7183 7184 // Check if it is legal to vectorize the loop. 7185 LoopVectorizationRequirements Requirements(*ORE); 7186 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE, 7187 &Requirements, &Hints, DB, AC); 7188 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7189 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7190 Hints.emitRemarkWithHints(); 7191 return false; 7192 } 7193 7194 // Check the function attributes to find out if this function should be 7195 // optimized for size. 7196 bool OptForSize = 7197 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7198 7199 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7200 // here. They may require CFG and instruction level transformations before 7201 // even evaluating whether vectorization is profitable. Since we cannot modify 7202 // the incoming IR, we need to build VPlan upfront in the vectorization 7203 // pipeline. 7204 if (!L->empty()) 7205 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7206 ORE, Hints); 7207 7208 assert(L->empty() && "Inner loop expected."); 7209 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7210 // count by optimizing for size, to minimize overheads. 7211 // Prefer constant trip counts over profile data, over upper bound estimate. 7212 unsigned ExpectedTC = 0; 7213 bool HasExpectedTC = false; 7214 if (const SCEVConstant *ConstExits = 7215 dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) { 7216 const APInt &ExitsCount = ConstExits->getAPInt(); 7217 // We are interested in small values for ExpectedTC. Skip over those that 7218 // can't fit an unsigned. 7219 if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) { 7220 ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1; 7221 HasExpectedTC = true; 7222 } 7223 } 7224 // ExpectedTC may be large because it's bound by a variable. Check 7225 // profiling information to validate we should vectorize. 7226 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 7227 auto EstimatedTC = getLoopEstimatedTripCount(L); 7228 if (EstimatedTC) { 7229 ExpectedTC = *EstimatedTC; 7230 HasExpectedTC = true; 7231 } 7232 } 7233 if (!HasExpectedTC) { 7234 ExpectedTC = SE->getSmallConstantMaxTripCount(L); 7235 HasExpectedTC = (ExpectedTC > 0); 7236 } 7237 7238 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 7239 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7240 << "This loop is worth vectorizing only if no scalar " 7241 << "iteration overheads are incurred."); 7242 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7243 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7244 else { 7245 LLVM_DEBUG(dbgs() << "\n"); 7246 // Loops with a very small trip count are considered for vectorization 7247 // under OptForSize, thereby making sure the cost of their loop body is 7248 // dominant, free of runtime guards and scalar iteration overheads. 7249 OptForSize = true; 7250 } 7251 } 7252 7253 // Check the function attributes to see if implicit floats are allowed. 7254 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7255 // an integer loop and the vector instructions selected are purely integer 7256 // vector instructions? 7257 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7258 LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7259 "attribute is used.\n"); 7260 ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7261 "NoImplicitFloat", L) 7262 << "loop not vectorized due to NoImplicitFloat attribute"); 7263 Hints.emitRemarkWithHints(); 7264 return false; 7265 } 7266 7267 // Check if the target supports potentially unsafe FP vectorization. 7268 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7269 // for the target we're vectorizing for, to make sure none of the 7270 // additional fp-math flags can help. 7271 if (Hints.isPotentiallyUnsafe() && 7272 TTI->isFPVectorizationPotentiallyUnsafe()) { 7273 LLVM_DEBUG( 7274 dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7275 ORE->emit( 7276 createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7277 << "loop not vectorized due to unsafe FP support."); 7278 Hints.emitRemarkWithHints(); 7279 return false; 7280 } 7281 7282 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7283 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7284 7285 // If an override option has been passed in for interleaved accesses, use it. 7286 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7287 UseInterleaved = EnableInterleavedMemAccesses; 7288 7289 // Analyze interleaved memory accesses. 7290 if (UseInterleaved) { 7291 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7292 } 7293 7294 // Use the cost model. 7295 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7296 &Hints, IAI); 7297 CM.collectValuesToIgnore(); 7298 7299 // Use the planner for vectorization. 7300 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 7301 7302 // Get user vectorization factor. 7303 unsigned UserVF = Hints.getWidth(); 7304 7305 // Plan how to best vectorize, return the best VF and its cost. 7306 VectorizationFactor VF = LVP.plan(OptForSize, UserVF); 7307 7308 // Select the interleave count. 7309 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7310 7311 // Get user interleave count. 7312 unsigned UserIC = Hints.getInterleave(); 7313 7314 // Identify the diagnostic messages that should be produced. 7315 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7316 bool VectorizeLoop = true, InterleaveLoop = true; 7317 if (Requirements.doesNotMeet(F, L, Hints)) { 7318 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7319 "requirements.\n"); 7320 Hints.emitRemarkWithHints(); 7321 return false; 7322 } 7323 7324 if (VF.Width == 1) { 7325 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7326 VecDiagMsg = std::make_pair( 7327 "VectorizationNotBeneficial", 7328 "the cost-model indicates that vectorization is not beneficial"); 7329 VectorizeLoop = false; 7330 } 7331 7332 if (IC == 1 && UserIC <= 1) { 7333 // Tell the user interleaving is not beneficial. 7334 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7335 IntDiagMsg = std::make_pair( 7336 "InterleavingNotBeneficial", 7337 "the cost-model indicates that interleaving is not beneficial"); 7338 InterleaveLoop = false; 7339 if (UserIC == 1) { 7340 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7341 IntDiagMsg.second += 7342 " and is explicitly disabled or interleave count is set to 1"; 7343 } 7344 } else if (IC > 1 && UserIC == 1) { 7345 // Tell the user interleaving is beneficial, but it explicitly disabled. 7346 LLVM_DEBUG( 7347 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7348 IntDiagMsg = std::make_pair( 7349 "InterleavingBeneficialButDisabled", 7350 "the cost-model indicates that interleaving is beneficial " 7351 "but is explicitly disabled or interleave count is set to 1"); 7352 InterleaveLoop = false; 7353 } 7354 7355 // Override IC if user provided an interleave count. 7356 IC = UserIC > 0 ? UserIC : IC; 7357 7358 // Emit diagnostic messages, if any. 7359 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7360 if (!VectorizeLoop && !InterleaveLoop) { 7361 // Do not vectorize or interleaving the loop. 7362 ORE->emit([&]() { 7363 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7364 L->getStartLoc(), L->getHeader()) 7365 << VecDiagMsg.second; 7366 }); 7367 ORE->emit([&]() { 7368 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7369 L->getStartLoc(), L->getHeader()) 7370 << IntDiagMsg.second; 7371 }); 7372 return false; 7373 } else if (!VectorizeLoop && InterleaveLoop) { 7374 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7375 ORE->emit([&]() { 7376 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7377 L->getStartLoc(), L->getHeader()) 7378 << VecDiagMsg.second; 7379 }); 7380 } else if (VectorizeLoop && !InterleaveLoop) { 7381 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7382 << ") in " << DebugLocStr << '\n'); 7383 ORE->emit([&]() { 7384 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7385 L->getStartLoc(), L->getHeader()) 7386 << IntDiagMsg.second; 7387 }); 7388 } else if (VectorizeLoop && InterleaveLoop) { 7389 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7390 << ") in " << DebugLocStr << '\n'); 7391 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7392 } 7393 7394 LVP.setBestPlan(VF.Width, IC); 7395 7396 using namespace ore; 7397 bool DisableRuntimeUnroll = false; 7398 MDNode *OrigLoopID = L->getLoopID(); 7399 7400 if (!VectorizeLoop) { 7401 assert(IC > 1 && "interleave count should not be 1 or 0"); 7402 // If we decided that it is not legal to vectorize the loop, then 7403 // interleave it. 7404 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7405 &CM); 7406 LVP.executePlan(Unroller, DT); 7407 7408 ORE->emit([&]() { 7409 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7410 L->getHeader()) 7411 << "interleaved loop (interleaved count: " 7412 << NV("InterleaveCount", IC) << ")"; 7413 }); 7414 } else { 7415 // If we decided that it is *legal* to vectorize the loop, then do it. 7416 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7417 &LVL, &CM); 7418 LVP.executePlan(LB, DT); 7419 ++LoopsVectorized; 7420 7421 // Add metadata to disable runtime unrolling a scalar loop when there are 7422 // no runtime checks about strides and memory. A scalar loop that is 7423 // rarely used is not worth unrolling. 7424 if (!LB.areSafetyChecksAdded()) 7425 DisableRuntimeUnroll = true; 7426 7427 // Report the vectorization decision. 7428 ORE->emit([&]() { 7429 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7430 L->getHeader()) 7431 << "vectorized loop (vectorization width: " 7432 << NV("VectorizationFactor", VF.Width) 7433 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7434 }); 7435 } 7436 7437 Optional<MDNode *> RemainderLoopID = 7438 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7439 LLVMLoopVectorizeFollowupEpilogue}); 7440 if (RemainderLoopID.hasValue()) { 7441 L->setLoopID(RemainderLoopID.getValue()); 7442 } else { 7443 if (DisableRuntimeUnroll) 7444 AddRuntimeUnrollDisableMetaData(L); 7445 7446 // Mark the loop as already vectorized to avoid vectorizing again. 7447 Hints.setAlreadyVectorized(); 7448 } 7449 7450 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7451 return true; 7452 } 7453 7454 bool LoopVectorizePass::runImpl( 7455 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7456 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7457 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7458 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7459 OptimizationRemarkEmitter &ORE_) { 7460 SE = &SE_; 7461 LI = &LI_; 7462 TTI = &TTI_; 7463 DT = &DT_; 7464 BFI = &BFI_; 7465 TLI = TLI_; 7466 AA = &AA_; 7467 AC = &AC_; 7468 GetLAA = &GetLAA_; 7469 DB = &DB_; 7470 ORE = &ORE_; 7471 7472 // Don't attempt if 7473 // 1. the target claims to have no vector registers, and 7474 // 2. interleaving won't help ILP. 7475 // 7476 // The second condition is necessary because, even if the target has no 7477 // vector registers, loop vectorization may still enable scalar 7478 // interleaving. 7479 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7480 return false; 7481 7482 bool Changed = false; 7483 7484 // The vectorizer requires loops to be in simplified form. 7485 // Since simplification may add new inner loops, it has to run before the 7486 // legality and profitability checks. This means running the loop vectorizer 7487 // will simplify all loops, regardless of whether anything end up being 7488 // vectorized. 7489 for (auto &L : *LI) 7490 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 7491 7492 // Build up a worklist of inner-loops to vectorize. This is necessary as 7493 // the act of vectorizing or partially unrolling a loop creates new loops 7494 // and can invalidate iterators across the loops. 7495 SmallVector<Loop *, 8> Worklist; 7496 7497 for (Loop *L : *LI) 7498 collectSupportedLoops(*L, LI, ORE, Worklist); 7499 7500 LoopsAnalyzed += Worklist.size(); 7501 7502 // Now walk the identified inner loops. 7503 while (!Worklist.empty()) { 7504 Loop *L = Worklist.pop_back_val(); 7505 7506 // For the inner loops we actually process, form LCSSA to simplify the 7507 // transform. 7508 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7509 7510 Changed |= processLoop(L); 7511 } 7512 7513 // Process each loop nest in the function. 7514 return Changed; 7515 } 7516 7517 PreservedAnalyses LoopVectorizePass::run(Function &F, 7518 FunctionAnalysisManager &AM) { 7519 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7520 auto &LI = AM.getResult<LoopAnalysis>(F); 7521 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7522 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7523 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7524 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7525 auto &AA = AM.getResult<AAManager>(F); 7526 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7527 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7528 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7529 7530 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7531 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7532 [&](Loop &L) -> const LoopAccessInfo & { 7533 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr}; 7534 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7535 }; 7536 bool Changed = 7537 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 7538 if (!Changed) 7539 return PreservedAnalyses::all(); 7540 PreservedAnalyses PA; 7541 7542 // We currently do not preserve loopinfo/dominator analyses with outer loop 7543 // vectorization. Until this is addressed, mark these analyses as preserved 7544 // only for non-VPlan-native path. 7545 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 7546 if (!EnableVPlanNativePath) { 7547 PA.preserve<LoopAnalysis>(); 7548 PA.preserve<DominatorTreeAnalysis>(); 7549 } 7550 PA.preserve<BasicAA>(); 7551 PA.preserve<GlobalsAA>(); 7552 return PA; 7553 } 7554