1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlanHCFGBuilder.h" 60 #include "VPlanHCFGTransforms.h" 61 #include "VPlanPredicator.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DenseMapInfo.h" 66 #include "llvm/ADT/Hashing.h" 67 #include "llvm/ADT/MapVector.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SetVector.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/MemorySSA.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpander.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/MathExtras.h" 133 #include "llvm/Support/raw_ostream.h" 134 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 135 #include "llvm/Transforms/Utils/LoopSimplify.h" 136 #include "llvm/Transforms/Utils/LoopUtils.h" 137 #include "llvm/Transforms/Utils/LoopVersioning.h" 138 #include "llvm/Transforms/Utils/SizeOpts.h" 139 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 140 #include <algorithm> 141 #include <cassert> 142 #include <cstdint> 143 #include <cstdlib> 144 #include <functional> 145 #include <iterator> 146 #include <limits> 147 #include <memory> 148 #include <string> 149 #include <tuple> 150 #include <utility> 151 #include <vector> 152 153 using namespace llvm; 154 155 #define LV_NAME "loop-vectorize" 156 #define DEBUG_TYPE LV_NAME 157 158 /// @{ 159 /// Metadata attribute names 160 static const char *const LLVMLoopVectorizeFollowupAll = 161 "llvm.loop.vectorize.followup_all"; 162 static const char *const LLVMLoopVectorizeFollowupVectorized = 163 "llvm.loop.vectorize.followup_vectorized"; 164 static const char *const LLVMLoopVectorizeFollowupEpilogue = 165 "llvm.loop.vectorize.followup_epilogue"; 166 /// @} 167 168 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 169 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 170 171 /// Loops with a known constant trip count below this number are vectorized only 172 /// if no scalar iteration overheads are incurred. 173 static cl::opt<unsigned> TinyTripCountVectorThreshold( 174 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 175 cl::desc("Loops with a constant trip count that is smaller than this " 176 "value are vectorized only if no scalar iteration overheads " 177 "are incurred.")); 178 179 static cl::opt<bool> MaximizeBandwidth( 180 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 181 cl::desc("Maximize bandwidth when selecting vectorization factor which " 182 "will be determined by the smallest type in loop.")); 183 184 static cl::opt<bool> EnableInterleavedMemAccesses( 185 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 186 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 187 188 /// An interleave-group may need masking if it resides in a block that needs 189 /// predication, or in order to mask away gaps. 190 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 191 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 192 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 193 194 /// We don't interleave loops with a known constant trip count below this 195 /// number. 196 static const unsigned TinyTripCountInterleaveThreshold = 128; 197 198 static cl::opt<unsigned> ForceTargetNumScalarRegs( 199 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 200 cl::desc("A flag that overrides the target's number of scalar registers.")); 201 202 static cl::opt<unsigned> ForceTargetNumVectorRegs( 203 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 204 cl::desc("A flag that overrides the target's number of vector registers.")); 205 206 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 207 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 208 cl::desc("A flag that overrides the target's max interleave factor for " 209 "scalar loops.")); 210 211 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 212 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 213 cl::desc("A flag that overrides the target's max interleave factor for " 214 "vectorized loops.")); 215 216 static cl::opt<unsigned> ForceTargetInstructionCost( 217 "force-target-instruction-cost", cl::init(0), cl::Hidden, 218 cl::desc("A flag that overrides the target's expected cost for " 219 "an instruction to a single constant value. Mostly " 220 "useful for getting consistent testing.")); 221 222 static cl::opt<unsigned> SmallLoopCost( 223 "small-loop-cost", cl::init(20), cl::Hidden, 224 cl::desc( 225 "The cost of a loop that is considered 'small' by the interleaver.")); 226 227 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 228 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 229 cl::desc("Enable the use of the block frequency analysis to access PGO " 230 "heuristics minimizing code growth in cold regions and being more " 231 "aggressive in hot regions.")); 232 233 // Runtime interleave loops for load/store throughput. 234 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 235 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 236 cl::desc( 237 "Enable runtime interleaving until load/store ports are saturated")); 238 239 /// The number of stores in a loop that are allowed to need predication. 240 static cl::opt<unsigned> NumberOfStoresToPredicate( 241 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 242 cl::desc("Max number of stores to be predicated behind an if.")); 243 244 static cl::opt<bool> EnableIndVarRegisterHeur( 245 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 246 cl::desc("Count the induction variable only once when interleaving")); 247 248 static cl::opt<bool> EnableCondStoresVectorization( 249 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 250 cl::desc("Enable if predication of stores during vectorization.")); 251 252 static cl::opt<unsigned> MaxNestedScalarReductionIC( 253 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 254 cl::desc("The maximum interleave count to use when interleaving a scalar " 255 "reduction in a nested loop.")); 256 257 cl::opt<bool> EnableVPlanNativePath( 258 "enable-vplan-native-path", cl::init(false), cl::Hidden, 259 cl::desc("Enable VPlan-native vectorization path with " 260 "support for outer loop vectorization.")); 261 262 // FIXME: Remove this switch once we have divergence analysis. Currently we 263 // assume divergent non-backedge branches when this switch is true. 264 cl::opt<bool> EnableVPlanPredication( 265 "enable-vplan-predication", cl::init(false), cl::Hidden, 266 cl::desc("Enable VPlan-native vectorization path predicator with " 267 "support for outer loop vectorization.")); 268 269 // This flag enables the stress testing of the VPlan H-CFG construction in the 270 // VPlan-native vectorization path. It must be used in conjuction with 271 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 272 // verification of the H-CFGs built. 273 static cl::opt<bool> VPlanBuildStressTest( 274 "vplan-build-stress-test", cl::init(false), cl::Hidden, 275 cl::desc( 276 "Build VPlan for every supported loop nest in the function and bail " 277 "out right after the build (stress test the VPlan H-CFG construction " 278 "in the VPlan-native vectorization path).")); 279 280 /// A helper function for converting Scalar types to vector types. 281 /// If the incoming type is void, we return void. If the VF is 1, we return 282 /// the scalar type. 283 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 284 if (Scalar->isVoidTy() || VF == 1) 285 return Scalar; 286 return VectorType::get(Scalar, VF); 287 } 288 289 /// A helper function that returns the type of loaded or stored value. 290 static Type *getMemInstValueType(Value *I) { 291 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 292 "Expected Load or Store instruction"); 293 if (auto *LI = dyn_cast<LoadInst>(I)) 294 return LI->getType(); 295 return cast<StoreInst>(I)->getValueOperand()->getType(); 296 } 297 298 /// A helper function that returns true if the given type is irregular. The 299 /// type is irregular if its allocated size doesn't equal the store size of an 300 /// element of the corresponding vector type at the given vectorization factor. 301 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 302 // Determine if an array of VF elements of type Ty is "bitcast compatible" 303 // with a <VF x Ty> vector. 304 if (VF > 1) { 305 auto *VectorTy = VectorType::get(Ty, VF); 306 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 307 } 308 309 // If the vectorization factor is one, we just check if an array of type Ty 310 // requires padding between elements. 311 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 312 } 313 314 /// A helper function that returns the reciprocal of the block probability of 315 /// predicated blocks. If we return X, we are assuming the predicated block 316 /// will execute once for every X iterations of the loop header. 317 /// 318 /// TODO: We should use actual block probability here, if available. Currently, 319 /// we always assume predicated blocks have a 50% chance of executing. 320 static unsigned getReciprocalPredBlockProb() { return 2; } 321 322 /// A helper function that adds a 'fast' flag to floating-point operations. 323 static Value *addFastMathFlag(Value *V) { 324 if (isa<FPMathOperator>(V)) 325 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 326 return V; 327 } 328 329 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 330 if (isa<FPMathOperator>(V)) 331 cast<Instruction>(V)->setFastMathFlags(FMF); 332 return V; 333 } 334 335 /// A helper function that returns an integer or floating-point constant with 336 /// value C. 337 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 338 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 339 : ConstantFP::get(Ty, C); 340 } 341 342 namespace llvm { 343 344 /// InnerLoopVectorizer vectorizes loops which contain only one basic 345 /// block to a specified vectorization factor (VF). 346 /// This class performs the widening of scalars into vectors, or multiple 347 /// scalars. This class also implements the following features: 348 /// * It inserts an epilogue loop for handling loops that don't have iteration 349 /// counts that are known to be a multiple of the vectorization factor. 350 /// * It handles the code generation for reduction variables. 351 /// * Scalarization (implementation using scalars) of un-vectorizable 352 /// instructions. 353 /// InnerLoopVectorizer does not perform any vectorization-legality 354 /// checks, and relies on the caller to check for the different legality 355 /// aspects. The InnerLoopVectorizer relies on the 356 /// LoopVectorizationLegality class to provide information about the induction 357 /// and reduction variables that were found to a given vectorization factor. 358 class InnerLoopVectorizer { 359 public: 360 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 361 LoopInfo *LI, DominatorTree *DT, 362 const TargetLibraryInfo *TLI, 363 const TargetTransformInfo *TTI, AssumptionCache *AC, 364 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 365 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 366 LoopVectorizationCostModel *CM) 367 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 368 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 369 Builder(PSE.getSE()->getContext()), 370 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 371 virtual ~InnerLoopVectorizer() = default; 372 373 /// Create a new empty loop. Unlink the old loop and connect the new one. 374 /// Return the pre-header block of the new loop. 375 BasicBlock *createVectorizedLoopSkeleton(); 376 377 /// Widen a single instruction within the innermost loop. 378 void widenInstruction(Instruction &I); 379 380 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 381 void fixVectorizedLoop(); 382 383 // Return true if any runtime check is added. 384 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 385 386 /// A type for vectorized values in the new loop. Each value from the 387 /// original loop, when vectorized, is represented by UF vector values in the 388 /// new unrolled loop, where UF is the unroll factor. 389 using VectorParts = SmallVector<Value *, 2>; 390 391 /// Vectorize a single PHINode in a block. This method handles the induction 392 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 393 /// arbitrary length vectors. 394 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 395 396 /// A helper function to scalarize a single Instruction in the innermost loop. 397 /// Generates a sequence of scalar instances for each lane between \p MinLane 398 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 399 /// inclusive.. 400 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 401 bool IfPredicateInstr); 402 403 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 404 /// is provided, the integer induction variable will first be truncated to 405 /// the corresponding type. 406 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 407 408 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 409 /// vector or scalar value on-demand if one is not yet available. When 410 /// vectorizing a loop, we visit the definition of an instruction before its 411 /// uses. When visiting the definition, we either vectorize or scalarize the 412 /// instruction, creating an entry for it in the corresponding map. (In some 413 /// cases, such as induction variables, we will create both vector and scalar 414 /// entries.) Then, as we encounter uses of the definition, we derive values 415 /// for each scalar or vector use unless such a value is already available. 416 /// For example, if we scalarize a definition and one of its uses is vector, 417 /// we build the required vector on-demand with an insertelement sequence 418 /// when visiting the use. Otherwise, if the use is scalar, we can use the 419 /// existing scalar definition. 420 /// 421 /// Return a value in the new loop corresponding to \p V from the original 422 /// loop at unroll index \p Part. If the value has already been vectorized, 423 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 424 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 425 /// a new vector value on-demand by inserting the scalar values into a vector 426 /// with an insertelement sequence. If the value has been neither vectorized 427 /// nor scalarized, it must be loop invariant, so we simply broadcast the 428 /// value into a vector. 429 Value *getOrCreateVectorValue(Value *V, unsigned Part); 430 431 /// Return a value in the new loop corresponding to \p V from the original 432 /// loop at unroll and vector indices \p Instance. If the value has been 433 /// vectorized but not scalarized, the necessary extractelement instruction 434 /// will be generated. 435 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 436 437 /// Construct the vector value of a scalarized value \p V one lane at a time. 438 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 439 440 /// Try to vectorize the interleaved access group that \p Instr belongs to, 441 /// optionally masking the vector operations if \p BlockInMask is non-null. 442 void vectorizeInterleaveGroup(Instruction *Instr, 443 VectorParts *BlockInMask = nullptr); 444 445 /// Vectorize Load and Store instructions, optionally masking the vector 446 /// operations if \p BlockInMask is non-null. 447 void vectorizeMemoryInstruction(Instruction *Instr, 448 VectorParts *BlockInMask = nullptr); 449 450 /// Set the debug location in the builder using the debug location in 451 /// the instruction. 452 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 453 454 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 455 void fixNonInductionPHIs(void); 456 457 protected: 458 friend class LoopVectorizationPlanner; 459 460 /// A small list of PHINodes. 461 using PhiVector = SmallVector<PHINode *, 4>; 462 463 /// A type for scalarized values in the new loop. Each value from the 464 /// original loop, when scalarized, is represented by UF x VF scalar values 465 /// in the new unrolled loop, where UF is the unroll factor and VF is the 466 /// vectorization factor. 467 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 468 469 /// Set up the values of the IVs correctly when exiting the vector loop. 470 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 471 Value *CountRoundDown, Value *EndValue, 472 BasicBlock *MiddleBlock); 473 474 /// Create a new induction variable inside L. 475 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 476 Value *Step, Instruction *DL); 477 478 /// Handle all cross-iteration phis in the header. 479 void fixCrossIterationPHIs(); 480 481 /// Fix a first-order recurrence. This is the second phase of vectorizing 482 /// this phi node. 483 void fixFirstOrderRecurrence(PHINode *Phi); 484 485 /// Fix a reduction cross-iteration phi. This is the second phase of 486 /// vectorizing this phi node. 487 void fixReduction(PHINode *Phi); 488 489 /// The Loop exit block may have single value PHI nodes with some 490 /// incoming value. While vectorizing we only handled real values 491 /// that were defined inside the loop and we should have one value for 492 /// each predecessor of its parent basic block. See PR14725. 493 void fixLCSSAPHIs(); 494 495 /// Iteratively sink the scalarized operands of a predicated instruction into 496 /// the block that was created for it. 497 void sinkScalarOperands(Instruction *PredInst); 498 499 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 500 /// represented as. 501 void truncateToMinimalBitwidths(); 502 503 /// Insert the new loop to the loop hierarchy and pass manager 504 /// and update the analysis passes. 505 void updateAnalysis(); 506 507 /// Create a broadcast instruction. This method generates a broadcast 508 /// instruction (shuffle) for loop invariant values and for the induction 509 /// value. If this is the induction variable then we extend it to N, N+1, ... 510 /// this is needed because each iteration in the loop corresponds to a SIMD 511 /// element. 512 virtual Value *getBroadcastInstrs(Value *V); 513 514 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 515 /// to each vector element of Val. The sequence starts at StartIndex. 516 /// \p Opcode is relevant for FP induction variable. 517 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 518 Instruction::BinaryOps Opcode = 519 Instruction::BinaryOpsEnd); 520 521 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 522 /// variable on which to base the steps, \p Step is the size of the step, and 523 /// \p EntryVal is the value from the original loop that maps to the steps. 524 /// Note that \p EntryVal doesn't have to be an induction variable - it 525 /// can also be a truncate instruction. 526 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 527 const InductionDescriptor &ID); 528 529 /// Create a vector induction phi node based on an existing scalar one. \p 530 /// EntryVal is the value from the original loop that maps to the vector phi 531 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 532 /// truncate instruction, instead of widening the original IV, we widen a 533 /// version of the IV truncated to \p EntryVal's type. 534 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 535 Value *Step, Instruction *EntryVal); 536 537 /// Returns true if an instruction \p I should be scalarized instead of 538 /// vectorized for the chosen vectorization factor. 539 bool shouldScalarizeInstruction(Instruction *I) const; 540 541 /// Returns true if we should generate a scalar version of \p IV. 542 bool needsScalarInduction(Instruction *IV) const; 543 544 /// If there is a cast involved in the induction variable \p ID, which should 545 /// be ignored in the vectorized loop body, this function records the 546 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 547 /// cast. We had already proved that the casted Phi is equal to the uncasted 548 /// Phi in the vectorized loop (under a runtime guard), and therefore 549 /// there is no need to vectorize the cast - the same value can be used in the 550 /// vector loop for both the Phi and the cast. 551 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 552 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 553 /// 554 /// \p EntryVal is the value from the original loop that maps to the vector 555 /// phi node and is used to distinguish what is the IV currently being 556 /// processed - original one (if \p EntryVal is a phi corresponding to the 557 /// original IV) or the "newly-created" one based on the proof mentioned above 558 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 559 /// latter case \p EntryVal is a TruncInst and we must not record anything for 560 /// that IV, but it's error-prone to expect callers of this routine to care 561 /// about that, hence this explicit parameter. 562 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 563 const Instruction *EntryVal, 564 Value *VectorLoopValue, 565 unsigned Part, 566 unsigned Lane = UINT_MAX); 567 568 /// Generate a shuffle sequence that will reverse the vector Vec. 569 virtual Value *reverseVector(Value *Vec); 570 571 /// Returns (and creates if needed) the original loop trip count. 572 Value *getOrCreateTripCount(Loop *NewLoop); 573 574 /// Returns (and creates if needed) the trip count of the widened loop. 575 Value *getOrCreateVectorTripCount(Loop *NewLoop); 576 577 /// Returns a bitcasted value to the requested vector type. 578 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 579 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 580 const DataLayout &DL); 581 582 /// Emit a bypass check to see if the vector trip count is zero, including if 583 /// it overflows. 584 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 585 586 /// Emit a bypass check to see if all of the SCEV assumptions we've 587 /// had to make are correct. 588 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 589 590 /// Emit bypass checks to check any memory assumptions we may have made. 591 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 592 593 /// Compute the transformed value of Index at offset StartValue using step 594 /// StepValue. 595 /// For integer induction, returns StartValue + Index * StepValue. 596 /// For pointer induction, returns StartValue[Index * StepValue]. 597 /// FIXME: The newly created binary instructions should contain nsw/nuw 598 /// flags, which can be found from the original scalar operations. 599 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 600 const DataLayout &DL, 601 const InductionDescriptor &ID) const; 602 603 /// Add additional metadata to \p To that was not present on \p Orig. 604 /// 605 /// Currently this is used to add the noalias annotations based on the 606 /// inserted memchecks. Use this for instructions that are *cloned* into the 607 /// vector loop. 608 void addNewMetadata(Instruction *To, const Instruction *Orig); 609 610 /// Add metadata from one instruction to another. 611 /// 612 /// This includes both the original MDs from \p From and additional ones (\see 613 /// addNewMetadata). Use this for *newly created* instructions in the vector 614 /// loop. 615 void addMetadata(Instruction *To, Instruction *From); 616 617 /// Similar to the previous function but it adds the metadata to a 618 /// vector of instructions. 619 void addMetadata(ArrayRef<Value *> To, Instruction *From); 620 621 /// The original loop. 622 Loop *OrigLoop; 623 624 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 625 /// dynamic knowledge to simplify SCEV expressions and converts them to a 626 /// more usable form. 627 PredicatedScalarEvolution &PSE; 628 629 /// Loop Info. 630 LoopInfo *LI; 631 632 /// Dominator Tree. 633 DominatorTree *DT; 634 635 /// Alias Analysis. 636 AliasAnalysis *AA; 637 638 /// Target Library Info. 639 const TargetLibraryInfo *TLI; 640 641 /// Target Transform Info. 642 const TargetTransformInfo *TTI; 643 644 /// Assumption Cache. 645 AssumptionCache *AC; 646 647 /// Interface to emit optimization remarks. 648 OptimizationRemarkEmitter *ORE; 649 650 /// LoopVersioning. It's only set up (non-null) if memchecks were 651 /// used. 652 /// 653 /// This is currently only used to add no-alias metadata based on the 654 /// memchecks. The actually versioning is performed manually. 655 std::unique_ptr<LoopVersioning> LVer; 656 657 /// The vectorization SIMD factor to use. Each vector will have this many 658 /// vector elements. 659 unsigned VF; 660 661 /// The vectorization unroll factor to use. Each scalar is vectorized to this 662 /// many different vector instructions. 663 unsigned UF; 664 665 /// The builder that we use 666 IRBuilder<> Builder; 667 668 // --- Vectorization state --- 669 670 /// The vector-loop preheader. 671 BasicBlock *LoopVectorPreHeader; 672 673 /// The scalar-loop preheader. 674 BasicBlock *LoopScalarPreHeader; 675 676 /// Middle Block between the vector and the scalar. 677 BasicBlock *LoopMiddleBlock; 678 679 /// The ExitBlock of the scalar loop. 680 BasicBlock *LoopExitBlock; 681 682 /// The vector loop body. 683 BasicBlock *LoopVectorBody; 684 685 /// The scalar loop body. 686 BasicBlock *LoopScalarBody; 687 688 /// A list of all bypass blocks. The first block is the entry of the loop. 689 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 690 691 /// The new Induction variable which was added to the new block. 692 PHINode *Induction = nullptr; 693 694 /// The induction variable of the old basic block. 695 PHINode *OldInduction = nullptr; 696 697 /// Maps values from the original loop to their corresponding values in the 698 /// vectorized loop. A key value can map to either vector values, scalar 699 /// values or both kinds of values, depending on whether the key was 700 /// vectorized and scalarized. 701 VectorizerValueMap VectorLoopValueMap; 702 703 /// Store instructions that were predicated. 704 SmallVector<Instruction *, 4> PredicatedInstructions; 705 706 /// Trip count of the original loop. 707 Value *TripCount = nullptr; 708 709 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 710 Value *VectorTripCount = nullptr; 711 712 /// The legality analysis. 713 LoopVectorizationLegality *Legal; 714 715 /// The profitablity analysis. 716 LoopVectorizationCostModel *Cost; 717 718 // Record whether runtime checks are added. 719 bool AddedSafetyChecks = false; 720 721 // Holds the end values for each induction variable. We save the end values 722 // so we can later fix-up the external users of the induction variables. 723 DenseMap<PHINode *, Value *> IVEndValues; 724 725 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 726 // fixed up at the end of vector code generation. 727 SmallVector<PHINode *, 8> OrigPHIsToFix; 728 }; 729 730 class InnerLoopUnroller : public InnerLoopVectorizer { 731 public: 732 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 733 LoopInfo *LI, DominatorTree *DT, 734 const TargetLibraryInfo *TLI, 735 const TargetTransformInfo *TTI, AssumptionCache *AC, 736 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 737 LoopVectorizationLegality *LVL, 738 LoopVectorizationCostModel *CM) 739 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 740 UnrollFactor, LVL, CM) {} 741 742 private: 743 Value *getBroadcastInstrs(Value *V) override; 744 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 745 Instruction::BinaryOps Opcode = 746 Instruction::BinaryOpsEnd) override; 747 Value *reverseVector(Value *Vec) override; 748 }; 749 750 } // end namespace llvm 751 752 /// Look for a meaningful debug location on the instruction or it's 753 /// operands. 754 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 755 if (!I) 756 return I; 757 758 DebugLoc Empty; 759 if (I->getDebugLoc() != Empty) 760 return I; 761 762 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 763 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 764 if (OpInst->getDebugLoc() != Empty) 765 return OpInst; 766 } 767 768 return I; 769 } 770 771 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 772 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 773 const DILocation *DIL = Inst->getDebugLoc(); 774 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 775 !isa<DbgInfoIntrinsic>(Inst)) { 776 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF); 777 if (NewDIL) 778 B.SetCurrentDebugLocation(NewDIL.getValue()); 779 else 780 LLVM_DEBUG(dbgs() 781 << "Failed to create new discriminator: " 782 << DIL->getFilename() << " Line: " << DIL->getLine()); 783 } 784 else 785 B.SetCurrentDebugLocation(DIL); 786 } else 787 B.SetCurrentDebugLocation(DebugLoc()); 788 } 789 790 #ifndef NDEBUG 791 /// \return string containing a file name and a line # for the given loop. 792 static std::string getDebugLocString(const Loop *L) { 793 std::string Result; 794 if (L) { 795 raw_string_ostream OS(Result); 796 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 797 LoopDbgLoc.print(OS); 798 else 799 // Just print the module name. 800 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 801 OS.flush(); 802 } 803 return Result; 804 } 805 #endif 806 807 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 808 const Instruction *Orig) { 809 // If the loop was versioned with memchecks, add the corresponding no-alias 810 // metadata. 811 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 812 LVer->annotateInstWithNoAlias(To, Orig); 813 } 814 815 void InnerLoopVectorizer::addMetadata(Instruction *To, 816 Instruction *From) { 817 propagateMetadata(To, From); 818 addNewMetadata(To, From); 819 } 820 821 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 822 Instruction *From) { 823 for (Value *V : To) { 824 if (Instruction *I = dyn_cast<Instruction>(V)) 825 addMetadata(I, From); 826 } 827 } 828 829 namespace llvm { 830 831 /// LoopVectorizationCostModel - estimates the expected speedups due to 832 /// vectorization. 833 /// In many cases vectorization is not profitable. This can happen because of 834 /// a number of reasons. In this class we mainly attempt to predict the 835 /// expected speedup/slowdowns due to the supported instruction set. We use the 836 /// TargetTransformInfo to query the different backends for the cost of 837 /// different operations. 838 class LoopVectorizationCostModel { 839 public: 840 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 841 LoopInfo *LI, LoopVectorizationLegality *Legal, 842 const TargetTransformInfo &TTI, 843 const TargetLibraryInfo *TLI, DemandedBits *DB, 844 AssumptionCache *AC, 845 OptimizationRemarkEmitter *ORE, const Function *F, 846 const LoopVectorizeHints *Hints, 847 InterleavedAccessInfo &IAI) 848 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 849 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints), InterleaveInfo(IAI) {} 850 851 /// \return An upper bound for the vectorization factor, or None if 852 /// vectorization and interleaving should be avoided up front. 853 Optional<unsigned> computeMaxVF(bool OptForSize); 854 855 /// \return The most profitable vectorization factor and the cost of that VF. 856 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 857 /// then this vectorization factor will be selected if vectorization is 858 /// possible. 859 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 860 861 /// Setup cost-based decisions for user vectorization factor. 862 void selectUserVectorizationFactor(unsigned UserVF) { 863 collectUniformsAndScalars(UserVF); 864 collectInstsToScalarize(UserVF); 865 } 866 867 /// \return The size (in bits) of the smallest and widest types in the code 868 /// that needs to be vectorized. We ignore values that remain scalar such as 869 /// 64 bit loop indices. 870 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 871 872 /// \return The desired interleave count. 873 /// If interleave count has been specified by metadata it will be returned. 874 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 875 /// are the selected vectorization factor and the cost of the selected VF. 876 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 877 unsigned LoopCost); 878 879 /// Memory access instruction may be vectorized in more than one way. 880 /// Form of instruction after vectorization depends on cost. 881 /// This function takes cost-based decisions for Load/Store instructions 882 /// and collects them in a map. This decisions map is used for building 883 /// the lists of loop-uniform and loop-scalar instructions. 884 /// The calculated cost is saved with widening decision in order to 885 /// avoid redundant calculations. 886 void setCostBasedWideningDecision(unsigned VF); 887 888 /// A struct that represents some properties of the register usage 889 /// of a loop. 890 struct RegisterUsage { 891 /// Holds the number of loop invariant values that are used in the loop. 892 unsigned LoopInvariantRegs; 893 894 /// Holds the maximum number of concurrent live intervals in the loop. 895 unsigned MaxLocalUsers; 896 }; 897 898 /// \return Returns information about the register usages of the loop for the 899 /// given vectorization factors. 900 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 901 902 /// Collect values we want to ignore in the cost model. 903 void collectValuesToIgnore(); 904 905 /// \returns The smallest bitwidth each instruction can be represented with. 906 /// The vector equivalents of these instructions should be truncated to this 907 /// type. 908 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 909 return MinBWs; 910 } 911 912 /// \returns True if it is more profitable to scalarize instruction \p I for 913 /// vectorization factor \p VF. 914 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 915 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 916 917 // Cost model is not run in the VPlan-native path - return conservative 918 // result until this changes. 919 if (EnableVPlanNativePath) 920 return false; 921 922 auto Scalars = InstsToScalarize.find(VF); 923 assert(Scalars != InstsToScalarize.end() && 924 "VF not yet analyzed for scalarization profitability"); 925 return Scalars->second.find(I) != Scalars->second.end(); 926 } 927 928 /// Returns true if \p I is known to be uniform after vectorization. 929 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 930 if (VF == 1) 931 return true; 932 933 // Cost model is not run in the VPlan-native path - return conservative 934 // result until this changes. 935 if (EnableVPlanNativePath) 936 return false; 937 938 auto UniformsPerVF = Uniforms.find(VF); 939 assert(UniformsPerVF != Uniforms.end() && 940 "VF not yet analyzed for uniformity"); 941 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 942 } 943 944 /// Returns true if \p I is known to be scalar after vectorization. 945 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 946 if (VF == 1) 947 return true; 948 949 // Cost model is not run in the VPlan-native path - return conservative 950 // result until this changes. 951 if (EnableVPlanNativePath) 952 return false; 953 954 auto ScalarsPerVF = Scalars.find(VF); 955 assert(ScalarsPerVF != Scalars.end() && 956 "Scalar values are not calculated for VF"); 957 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 958 } 959 960 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 961 /// for vectorization factor \p VF. 962 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 963 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 964 !isProfitableToScalarize(I, VF) && 965 !isScalarAfterVectorization(I, VF); 966 } 967 968 /// Decision that was taken during cost calculation for memory instruction. 969 enum InstWidening { 970 CM_Unknown, 971 CM_Widen, // For consecutive accesses with stride +1. 972 CM_Widen_Reverse, // For consecutive accesses with stride -1. 973 CM_Interleave, 974 CM_GatherScatter, 975 CM_Scalarize 976 }; 977 978 /// Save vectorization decision \p W and \p Cost taken by the cost model for 979 /// instruction \p I and vector width \p VF. 980 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 981 unsigned Cost) { 982 assert(VF >= 2 && "Expected VF >=2"); 983 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 984 } 985 986 /// Save vectorization decision \p W and \p Cost taken by the cost model for 987 /// interleaving group \p Grp and vector width \p VF. 988 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 989 InstWidening W, unsigned Cost) { 990 assert(VF >= 2 && "Expected VF >=2"); 991 /// Broadcast this decicion to all instructions inside the group. 992 /// But the cost will be assigned to one instruction only. 993 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 994 if (auto *I = Grp->getMember(i)) { 995 if (Grp->getInsertPos() == I) 996 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 997 else 998 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 999 } 1000 } 1001 } 1002 1003 /// Return the cost model decision for the given instruction \p I and vector 1004 /// width \p VF. Return CM_Unknown if this instruction did not pass 1005 /// through the cost modeling. 1006 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1007 assert(VF >= 2 && "Expected VF >=2"); 1008 1009 // Cost model is not run in the VPlan-native path - return conservative 1010 // result until this changes. 1011 if (EnableVPlanNativePath) 1012 return CM_GatherScatter; 1013 1014 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1015 auto Itr = WideningDecisions.find(InstOnVF); 1016 if (Itr == WideningDecisions.end()) 1017 return CM_Unknown; 1018 return Itr->second.first; 1019 } 1020 1021 /// Return the vectorization cost for the given instruction \p I and vector 1022 /// width \p VF. 1023 unsigned getWideningCost(Instruction *I, unsigned VF) { 1024 assert(VF >= 2 && "Expected VF >=2"); 1025 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1026 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1027 "The cost is not calculated"); 1028 return WideningDecisions[InstOnVF].second; 1029 } 1030 1031 /// Return True if instruction \p I is an optimizable truncate whose operand 1032 /// is an induction variable. Such a truncate will be removed by adding a new 1033 /// induction variable with the destination type. 1034 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1035 // If the instruction is not a truncate, return false. 1036 auto *Trunc = dyn_cast<TruncInst>(I); 1037 if (!Trunc) 1038 return false; 1039 1040 // Get the source and destination types of the truncate. 1041 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1042 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1043 1044 // If the truncate is free for the given types, return false. Replacing a 1045 // free truncate with an induction variable would add an induction variable 1046 // update instruction to each iteration of the loop. We exclude from this 1047 // check the primary induction variable since it will need an update 1048 // instruction regardless. 1049 Value *Op = Trunc->getOperand(0); 1050 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1051 return false; 1052 1053 // If the truncated value is not an induction variable, return false. 1054 return Legal->isInductionPhi(Op); 1055 } 1056 1057 /// Collects the instructions to scalarize for each predicated instruction in 1058 /// the loop. 1059 void collectInstsToScalarize(unsigned VF); 1060 1061 /// Collect Uniform and Scalar values for the given \p VF. 1062 /// The sets depend on CM decision for Load/Store instructions 1063 /// that may be vectorized as interleave, gather-scatter or scalarized. 1064 void collectUniformsAndScalars(unsigned VF) { 1065 // Do the analysis once. 1066 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1067 return; 1068 setCostBasedWideningDecision(VF); 1069 collectLoopUniforms(VF); 1070 collectLoopScalars(VF); 1071 } 1072 1073 /// Returns true if the target machine supports masked store operation 1074 /// for the given \p DataType and kind of access to \p Ptr. 1075 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1076 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType); 1077 } 1078 1079 /// Returns true if the target machine supports masked load operation 1080 /// for the given \p DataType and kind of access to \p Ptr. 1081 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1082 return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType); 1083 } 1084 1085 /// Returns true if the target machine supports masked scatter operation 1086 /// for the given \p DataType. 1087 bool isLegalMaskedScatter(Type *DataType) { 1088 return TTI.isLegalMaskedScatter(DataType); 1089 } 1090 1091 /// Returns true if the target machine supports masked gather operation 1092 /// for the given \p DataType. 1093 bool isLegalMaskedGather(Type *DataType) { 1094 return TTI.isLegalMaskedGather(DataType); 1095 } 1096 1097 /// Returns true if the target machine can represent \p V as a masked gather 1098 /// or scatter operation. 1099 bool isLegalGatherOrScatter(Value *V) { 1100 bool LI = isa<LoadInst>(V); 1101 bool SI = isa<StoreInst>(V); 1102 if (!LI && !SI) 1103 return false; 1104 auto *Ty = getMemInstValueType(V); 1105 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1106 } 1107 1108 /// Returns true if \p I is an instruction that will be scalarized with 1109 /// predication. Such instructions include conditional stores and 1110 /// instructions that may divide by zero. 1111 /// If a non-zero VF has been calculated, we check if I will be scalarized 1112 /// predication for that VF. 1113 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1114 1115 // Returns true if \p I is an instruction that will be predicated either 1116 // through scalar predication or masked load/store or masked gather/scatter. 1117 // Superset of instructions that return true for isScalarWithPredication. 1118 bool isPredicatedInst(Instruction *I) { 1119 if (!blockNeedsPredication(I->getParent())) 1120 return false; 1121 // Loads and stores that need some form of masked operation are predicated 1122 // instructions. 1123 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1124 return Legal->isMaskRequired(I); 1125 return isScalarWithPredication(I); 1126 } 1127 1128 /// Returns true if \p I is a memory instruction with consecutive memory 1129 /// access that can be widened. 1130 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1131 1132 /// Returns true if \p I is a memory instruction in an interleaved-group 1133 /// of memory accesses that can be vectorized with wide vector loads/stores 1134 /// and shuffles. 1135 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1136 1137 /// Check if \p Instr belongs to any interleaved access group. 1138 bool isAccessInterleaved(Instruction *Instr) { 1139 return InterleaveInfo.isInterleaved(Instr); 1140 } 1141 1142 /// Get the interleaved access group that \p Instr belongs to. 1143 const InterleaveGroup<Instruction> * 1144 getInterleavedAccessGroup(Instruction *Instr) { 1145 return InterleaveInfo.getInterleaveGroup(Instr); 1146 } 1147 1148 /// Returns true if an interleaved group requires a scalar iteration 1149 /// to handle accesses with gaps, and there is nothing preventing us from 1150 /// creating a scalar epilogue. 1151 bool requiresScalarEpilogue() const { 1152 return IsScalarEpilogueAllowed && InterleaveInfo.requiresScalarEpilogue(); 1153 } 1154 1155 /// Returns true if a scalar epilogue is not allowed due to optsize. 1156 bool isScalarEpilogueAllowed() const { return IsScalarEpilogueAllowed; } 1157 1158 /// Returns true if all loop blocks should be masked to fold tail loop. 1159 bool foldTailByMasking() const { return FoldTailByMasking; } 1160 1161 bool blockNeedsPredication(BasicBlock *BB) { 1162 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1163 } 1164 1165 private: 1166 unsigned NumPredStores = 0; 1167 1168 /// \return An upper bound for the vectorization factor, larger than zero. 1169 /// One is returned if vectorization should best be avoided due to cost. 1170 unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount); 1171 1172 /// The vectorization cost is a combination of the cost itself and a boolean 1173 /// indicating whether any of the contributing operations will actually 1174 /// operate on 1175 /// vector values after type legalization in the backend. If this latter value 1176 /// is 1177 /// false, then all operations will be scalarized (i.e. no vectorization has 1178 /// actually taken place). 1179 using VectorizationCostTy = std::pair<unsigned, bool>; 1180 1181 /// Returns the expected execution cost. The unit of the cost does 1182 /// not matter because we use the 'cost' units to compare different 1183 /// vector widths. The cost that is returned is *not* normalized by 1184 /// the factor width. 1185 VectorizationCostTy expectedCost(unsigned VF); 1186 1187 /// Returns the execution time cost of an instruction for a given vector 1188 /// width. Vector width of one means scalar. 1189 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1190 1191 /// The cost-computation logic from getInstructionCost which provides 1192 /// the vector type as an output parameter. 1193 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1194 1195 /// Calculate vectorization cost of memory instruction \p I. 1196 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1197 1198 /// The cost computation for scalarized memory instruction. 1199 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1200 1201 /// The cost computation for interleaving group of memory instructions. 1202 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1203 1204 /// The cost computation for Gather/Scatter instruction. 1205 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1206 1207 /// The cost computation for widening instruction \p I with consecutive 1208 /// memory access. 1209 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1210 1211 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1212 /// Load: scalar load + broadcast. 1213 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1214 /// element) 1215 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1216 1217 /// Returns whether the instruction is a load or store and will be a emitted 1218 /// as a vector operation. 1219 bool isConsecutiveLoadOrStore(Instruction *I); 1220 1221 /// Returns true if an artificially high cost for emulated masked memrefs 1222 /// should be used. 1223 bool useEmulatedMaskMemRefHack(Instruction *I); 1224 1225 /// Create an analysis remark that explains why vectorization failed 1226 /// 1227 /// \p RemarkName is the identifier for the remark. \return the remark object 1228 /// that can be streamed to. 1229 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1230 return createLVMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1231 RemarkName, TheLoop); 1232 } 1233 1234 /// Map of scalar integer values to the smallest bitwidth they can be legally 1235 /// represented as. The vector equivalents of these values should be truncated 1236 /// to this type. 1237 MapVector<Instruction *, uint64_t> MinBWs; 1238 1239 /// A type representing the costs for instructions if they were to be 1240 /// scalarized rather than vectorized. The entries are Instruction-Cost 1241 /// pairs. 1242 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1243 1244 /// A set containing all BasicBlocks that are known to present after 1245 /// vectorization as a predicated block. 1246 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1247 1248 /// Records whether it is allowed to have the original scalar loop execute at 1249 /// least once. This may be needed as a fallback loop in case runtime 1250 /// aliasing/dependence checks fail, or to handle the tail/remainder 1251 /// iterations when the trip count is unknown or doesn't divide by the VF, 1252 /// or as a peel-loop to handle gaps in interleave-groups. 1253 /// Under optsize and when the trip count is very small we don't allow any 1254 /// iterations to execute in the scalar loop. 1255 bool IsScalarEpilogueAllowed = true; 1256 1257 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1258 bool FoldTailByMasking = false; 1259 1260 /// A map holding scalar costs for different vectorization factors. The 1261 /// presence of a cost for an instruction in the mapping indicates that the 1262 /// instruction will be scalarized when vectorizing with the associated 1263 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1264 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1265 1266 /// Holds the instructions known to be uniform after vectorization. 1267 /// The data is collected per VF. 1268 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1269 1270 /// Holds the instructions known to be scalar after vectorization. 1271 /// The data is collected per VF. 1272 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1273 1274 /// Holds the instructions (address computations) that are forced to be 1275 /// scalarized. 1276 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1277 1278 /// Returns the expected difference in cost from scalarizing the expression 1279 /// feeding a predicated instruction \p PredInst. The instructions to 1280 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1281 /// non-negative return value implies the expression will be scalarized. 1282 /// Currently, only single-use chains are considered for scalarization. 1283 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1284 unsigned VF); 1285 1286 /// Collect the instructions that are uniform after vectorization. An 1287 /// instruction is uniform if we represent it with a single scalar value in 1288 /// the vectorized loop corresponding to each vector iteration. Examples of 1289 /// uniform instructions include pointer operands of consecutive or 1290 /// interleaved memory accesses. Note that although uniformity implies an 1291 /// instruction will be scalar, the reverse is not true. In general, a 1292 /// scalarized instruction will be represented by VF scalar values in the 1293 /// vectorized loop, each corresponding to an iteration of the original 1294 /// scalar loop. 1295 void collectLoopUniforms(unsigned VF); 1296 1297 /// Collect the instructions that are scalar after vectorization. An 1298 /// instruction is scalar if it is known to be uniform or will be scalarized 1299 /// during vectorization. Non-uniform scalarized instructions will be 1300 /// represented by VF values in the vectorized loop, each corresponding to an 1301 /// iteration of the original scalar loop. 1302 void collectLoopScalars(unsigned VF); 1303 1304 /// Keeps cost model vectorization decision and cost for instructions. 1305 /// Right now it is used for memory instructions only. 1306 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1307 std::pair<InstWidening, unsigned>>; 1308 1309 DecisionList WideningDecisions; 1310 1311 public: 1312 /// The loop that we evaluate. 1313 Loop *TheLoop; 1314 1315 /// Predicated scalar evolution analysis. 1316 PredicatedScalarEvolution &PSE; 1317 1318 /// Loop Info analysis. 1319 LoopInfo *LI; 1320 1321 /// Vectorization legality. 1322 LoopVectorizationLegality *Legal; 1323 1324 /// Vector target information. 1325 const TargetTransformInfo &TTI; 1326 1327 /// Target Library Info. 1328 const TargetLibraryInfo *TLI; 1329 1330 /// Demanded bits analysis. 1331 DemandedBits *DB; 1332 1333 /// Assumption cache. 1334 AssumptionCache *AC; 1335 1336 /// Interface to emit optimization remarks. 1337 OptimizationRemarkEmitter *ORE; 1338 1339 const Function *TheFunction; 1340 1341 /// Loop Vectorize Hint. 1342 const LoopVectorizeHints *Hints; 1343 1344 /// The interleave access information contains groups of interleaved accesses 1345 /// with the same stride and close to each other. 1346 InterleavedAccessInfo &InterleaveInfo; 1347 1348 /// Values to ignore in the cost model. 1349 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1350 1351 /// Values to ignore in the cost model when VF > 1. 1352 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1353 }; 1354 1355 } // end namespace llvm 1356 1357 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1358 // vectorization. The loop needs to be annotated with #pragma omp simd 1359 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1360 // vector length information is not provided, vectorization is not considered 1361 // explicit. Interleave hints are not allowed either. These limitations will be 1362 // relaxed in the future. 1363 // Please, note that we are currently forced to abuse the pragma 'clang 1364 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1365 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1366 // provides *explicit vectorization hints* (LV can bypass legal checks and 1367 // assume that vectorization is legal). However, both hints are implemented 1368 // using the same metadata (llvm.loop.vectorize, processed by 1369 // LoopVectorizeHints). This will be fixed in the future when the native IR 1370 // representation for pragma 'omp simd' is introduced. 1371 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1372 OptimizationRemarkEmitter *ORE) { 1373 assert(!OuterLp->empty() && "This is not an outer loop"); 1374 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1375 1376 // Only outer loops with an explicit vectorization hint are supported. 1377 // Unannotated outer loops are ignored. 1378 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1379 return false; 1380 1381 Function *Fn = OuterLp->getHeader()->getParent(); 1382 if (!Hints.allowVectorization(Fn, OuterLp, 1383 true /*VectorizeOnlyWhenForced*/)) { 1384 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1385 return false; 1386 } 1387 1388 if (Hints.getInterleave() > 1) { 1389 // TODO: Interleave support is future work. 1390 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1391 "outer loops.\n"); 1392 Hints.emitRemarkWithHints(); 1393 return false; 1394 } 1395 1396 return true; 1397 } 1398 1399 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1400 OptimizationRemarkEmitter *ORE, 1401 SmallVectorImpl<Loop *> &V) { 1402 // Collect inner loops and outer loops without irreducible control flow. For 1403 // now, only collect outer loops that have explicit vectorization hints. If we 1404 // are stress testing the VPlan H-CFG construction, we collect the outermost 1405 // loop of every loop nest. 1406 if (L.empty() || VPlanBuildStressTest || 1407 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1408 LoopBlocksRPO RPOT(&L); 1409 RPOT.perform(LI); 1410 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1411 V.push_back(&L); 1412 // TODO: Collect inner loops inside marked outer loops in case 1413 // vectorization fails for the outer loop. Do not invoke 1414 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1415 // already known to be reducible. We can use an inherited attribute for 1416 // that. 1417 return; 1418 } 1419 } 1420 for (Loop *InnerL : L) 1421 collectSupportedLoops(*InnerL, LI, ORE, V); 1422 } 1423 1424 namespace { 1425 1426 /// The LoopVectorize Pass. 1427 struct LoopVectorize : public FunctionPass { 1428 /// Pass identification, replacement for typeid 1429 static char ID; 1430 1431 LoopVectorizePass Impl; 1432 1433 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1434 bool VectorizeOnlyWhenForced = false) 1435 : FunctionPass(ID) { 1436 Impl.InterleaveOnlyWhenForced = InterleaveOnlyWhenForced; 1437 Impl.VectorizeOnlyWhenForced = VectorizeOnlyWhenForced; 1438 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1439 } 1440 1441 bool runOnFunction(Function &F) override { 1442 if (skipFunction(F)) 1443 return false; 1444 1445 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1446 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1447 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1448 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1449 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1450 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1451 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1452 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1453 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1454 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1455 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1456 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1457 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1458 1459 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1460 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1461 1462 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1463 GetLAA, *ORE, PSI); 1464 } 1465 1466 void getAnalysisUsage(AnalysisUsage &AU) const override { 1467 AU.addRequired<AssumptionCacheTracker>(); 1468 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1469 AU.addRequired<DominatorTreeWrapperPass>(); 1470 AU.addRequired<LoopInfoWrapperPass>(); 1471 AU.addRequired<ScalarEvolutionWrapperPass>(); 1472 AU.addRequired<TargetTransformInfoWrapperPass>(); 1473 AU.addRequired<AAResultsWrapperPass>(); 1474 AU.addRequired<LoopAccessLegacyAnalysis>(); 1475 AU.addRequired<DemandedBitsWrapperPass>(); 1476 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1477 1478 // We currently do not preserve loopinfo/dominator analyses with outer loop 1479 // vectorization. Until this is addressed, mark these analyses as preserved 1480 // only for non-VPlan-native path. 1481 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1482 if (!EnableVPlanNativePath) { 1483 AU.addPreserved<LoopInfoWrapperPass>(); 1484 AU.addPreserved<DominatorTreeWrapperPass>(); 1485 } 1486 1487 AU.addPreserved<BasicAAWrapperPass>(); 1488 AU.addPreserved<GlobalsAAWrapperPass>(); 1489 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1490 } 1491 }; 1492 1493 } // end anonymous namespace 1494 1495 //===----------------------------------------------------------------------===// 1496 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1497 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1498 //===----------------------------------------------------------------------===// 1499 1500 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1501 // We need to place the broadcast of invariant variables outside the loop, 1502 // but only if it's proven safe to do so. Else, broadcast will be inside 1503 // vector loop body. 1504 Instruction *Instr = dyn_cast<Instruction>(V); 1505 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1506 (!Instr || 1507 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1508 // Place the code for broadcasting invariant variables in the new preheader. 1509 IRBuilder<>::InsertPointGuard Guard(Builder); 1510 if (SafeToHoist) 1511 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1512 1513 // Broadcast the scalar into all locations in the vector. 1514 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1515 1516 return Shuf; 1517 } 1518 1519 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1520 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1521 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1522 "Expected either an induction phi-node or a truncate of it!"); 1523 Value *Start = II.getStartValue(); 1524 1525 // Construct the initial value of the vector IV in the vector loop preheader 1526 auto CurrIP = Builder.saveIP(); 1527 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1528 if (isa<TruncInst>(EntryVal)) { 1529 assert(Start->getType()->isIntegerTy() && 1530 "Truncation requires an integer type"); 1531 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1532 Step = Builder.CreateTrunc(Step, TruncType); 1533 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1534 } 1535 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1536 Value *SteppedStart = 1537 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1538 1539 // We create vector phi nodes for both integer and floating-point induction 1540 // variables. Here, we determine the kind of arithmetic we will perform. 1541 Instruction::BinaryOps AddOp; 1542 Instruction::BinaryOps MulOp; 1543 if (Step->getType()->isIntegerTy()) { 1544 AddOp = Instruction::Add; 1545 MulOp = Instruction::Mul; 1546 } else { 1547 AddOp = II.getInductionOpcode(); 1548 MulOp = Instruction::FMul; 1549 } 1550 1551 // Multiply the vectorization factor by the step using integer or 1552 // floating-point arithmetic as appropriate. 1553 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1554 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1555 1556 // Create a vector splat to use in the induction update. 1557 // 1558 // FIXME: If the step is non-constant, we create the vector splat with 1559 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1560 // handle a constant vector splat. 1561 Value *SplatVF = isa<Constant>(Mul) 1562 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 1563 : Builder.CreateVectorSplat(VF, Mul); 1564 Builder.restoreIP(CurrIP); 1565 1566 // We may need to add the step a number of times, depending on the unroll 1567 // factor. The last of those goes into the PHI. 1568 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1569 &*LoopVectorBody->getFirstInsertionPt()); 1570 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1571 Instruction *LastInduction = VecInd; 1572 for (unsigned Part = 0; Part < UF; ++Part) { 1573 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1574 1575 if (isa<TruncInst>(EntryVal)) 1576 addMetadata(LastInduction, EntryVal); 1577 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1578 1579 LastInduction = cast<Instruction>(addFastMathFlag( 1580 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1581 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1582 } 1583 1584 // Move the last step to the end of the latch block. This ensures consistent 1585 // placement of all induction updates. 1586 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1587 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1588 auto *ICmp = cast<Instruction>(Br->getCondition()); 1589 LastInduction->moveBefore(ICmp); 1590 LastInduction->setName("vec.ind.next"); 1591 1592 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1593 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1594 } 1595 1596 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1597 return Cost->isScalarAfterVectorization(I, VF) || 1598 Cost->isProfitableToScalarize(I, VF); 1599 } 1600 1601 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1602 if (shouldScalarizeInstruction(IV)) 1603 return true; 1604 auto isScalarInst = [&](User *U) -> bool { 1605 auto *I = cast<Instruction>(U); 1606 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1607 }; 1608 return llvm::any_of(IV->users(), isScalarInst); 1609 } 1610 1611 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1612 const InductionDescriptor &ID, const Instruction *EntryVal, 1613 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1614 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1615 "Expected either an induction phi-node or a truncate of it!"); 1616 1617 // This induction variable is not the phi from the original loop but the 1618 // newly-created IV based on the proof that casted Phi is equal to the 1619 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1620 // re-uses the same InductionDescriptor that original IV uses but we don't 1621 // have to do any recording in this case - that is done when original IV is 1622 // processed. 1623 if (isa<TruncInst>(EntryVal)) 1624 return; 1625 1626 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1627 if (Casts.empty()) 1628 return; 1629 // Only the first Cast instruction in the Casts vector is of interest. 1630 // The rest of the Casts (if exist) have no uses outside the 1631 // induction update chain itself. 1632 Instruction *CastInst = *Casts.begin(); 1633 if (Lane < UINT_MAX) 1634 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1635 else 1636 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1637 } 1638 1639 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1640 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1641 "Primary induction variable must have an integer type"); 1642 1643 auto II = Legal->getInductionVars()->find(IV); 1644 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 1645 1646 auto ID = II->second; 1647 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1648 1649 // The scalar value to broadcast. This will be derived from the canonical 1650 // induction variable. 1651 Value *ScalarIV = nullptr; 1652 1653 // The value from the original loop to which we are mapping the new induction 1654 // variable. 1655 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1656 1657 // True if we have vectorized the induction variable. 1658 auto VectorizedIV = false; 1659 1660 // Determine if we want a scalar version of the induction variable. This is 1661 // true if the induction variable itself is not widened, or if it has at 1662 // least one user in the loop that is not widened. 1663 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 1664 1665 // Generate code for the induction step. Note that induction steps are 1666 // required to be loop-invariant 1667 assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) && 1668 "Induction step should be loop invariant"); 1669 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1670 Value *Step = nullptr; 1671 if (PSE.getSE()->isSCEVable(IV->getType())) { 1672 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1673 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 1674 LoopVectorPreHeader->getTerminator()); 1675 } else { 1676 Step = cast<SCEVUnknown>(ID.getStep())->getValue(); 1677 } 1678 1679 // Try to create a new independent vector induction variable. If we can't 1680 // create the phi node, we will splat the scalar induction variable in each 1681 // loop iteration. 1682 if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) { 1683 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1684 VectorizedIV = true; 1685 } 1686 1687 // If we haven't yet vectorized the induction variable, or if we will create 1688 // a scalar one, we need to define the scalar induction variable and step 1689 // values. If we were given a truncation type, truncate the canonical 1690 // induction variable and step. Otherwise, derive these values from the 1691 // induction descriptor. 1692 if (!VectorizedIV || NeedsScalarIV) { 1693 ScalarIV = Induction; 1694 if (IV != OldInduction) { 1695 ScalarIV = IV->getType()->isIntegerTy() 1696 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1697 : Builder.CreateCast(Instruction::SIToFP, Induction, 1698 IV->getType()); 1699 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1700 ScalarIV->setName("offset.idx"); 1701 } 1702 if (Trunc) { 1703 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1704 assert(Step->getType()->isIntegerTy() && 1705 "Truncation requires an integer step"); 1706 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1707 Step = Builder.CreateTrunc(Step, TruncType); 1708 } 1709 } 1710 1711 // If we haven't yet vectorized the induction variable, splat the scalar 1712 // induction variable, and build the necessary step vectors. 1713 // TODO: Don't do it unless the vectorized IV is really required. 1714 if (!VectorizedIV) { 1715 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1716 for (unsigned Part = 0; Part < UF; ++Part) { 1717 Value *EntryPart = 1718 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1719 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1720 if (Trunc) 1721 addMetadata(EntryPart, Trunc); 1722 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1723 } 1724 } 1725 1726 // If an induction variable is only used for counting loop iterations or 1727 // calculating addresses, it doesn't need to be widened. Create scalar steps 1728 // that can be used by instructions we will later scalarize. Note that the 1729 // addition of the scalar steps will not increase the number of instructions 1730 // in the loop in the common case prior to InstCombine. We will be trading 1731 // one vector extract for each scalar step. 1732 if (NeedsScalarIV) 1733 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1734 } 1735 1736 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1737 Instruction::BinaryOps BinOp) { 1738 // Create and check the types. 1739 assert(Val->getType()->isVectorTy() && "Must be a vector"); 1740 int VLen = Val->getType()->getVectorNumElements(); 1741 1742 Type *STy = Val->getType()->getScalarType(); 1743 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1744 "Induction Step must be an integer or FP"); 1745 assert(Step->getType() == STy && "Step has wrong type"); 1746 1747 SmallVector<Constant *, 8> Indices; 1748 1749 if (STy->isIntegerTy()) { 1750 // Create a vector of consecutive numbers from zero to VF. 1751 for (int i = 0; i < VLen; ++i) 1752 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1753 1754 // Add the consecutive indices to the vector value. 1755 Constant *Cv = ConstantVector::get(Indices); 1756 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1757 Step = Builder.CreateVectorSplat(VLen, Step); 1758 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1759 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1760 // which can be found from the original scalar operations. 1761 Step = Builder.CreateMul(Cv, Step); 1762 return Builder.CreateAdd(Val, Step, "induction"); 1763 } 1764 1765 // Floating point induction. 1766 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1767 "Binary Opcode should be specified for FP induction"); 1768 // Create a vector of consecutive numbers from zero to VF. 1769 for (int i = 0; i < VLen; ++i) 1770 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1771 1772 // Add the consecutive indices to the vector value. 1773 Constant *Cv = ConstantVector::get(Indices); 1774 1775 Step = Builder.CreateVectorSplat(VLen, Step); 1776 1777 // Floating point operations had to be 'fast' to enable the induction. 1778 FastMathFlags Flags; 1779 Flags.setFast(); 1780 1781 Value *MulOp = Builder.CreateFMul(Cv, Step); 1782 if (isa<Instruction>(MulOp)) 1783 // Have to check, MulOp may be a constant 1784 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1785 1786 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1787 if (isa<Instruction>(BOp)) 1788 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1789 return BOp; 1790 } 1791 1792 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1793 Instruction *EntryVal, 1794 const InductionDescriptor &ID) { 1795 // We shouldn't have to build scalar steps if we aren't vectorizing. 1796 assert(VF > 1 && "VF should be greater than one"); 1797 1798 // Get the value type and ensure it and the step have the same integer type. 1799 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1800 assert(ScalarIVTy == Step->getType() && 1801 "Val and Step should have the same type"); 1802 1803 // We build scalar steps for both integer and floating-point induction 1804 // variables. Here, we determine the kind of arithmetic we will perform. 1805 Instruction::BinaryOps AddOp; 1806 Instruction::BinaryOps MulOp; 1807 if (ScalarIVTy->isIntegerTy()) { 1808 AddOp = Instruction::Add; 1809 MulOp = Instruction::Mul; 1810 } else { 1811 AddOp = ID.getInductionOpcode(); 1812 MulOp = Instruction::FMul; 1813 } 1814 1815 // Determine the number of scalars we need to generate for each unroll 1816 // iteration. If EntryVal is uniform, we only need to generate the first 1817 // lane. Otherwise, we generate all VF values. 1818 unsigned Lanes = 1819 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 1820 : VF; 1821 // Compute the scalar steps and save the results in VectorLoopValueMap. 1822 for (unsigned Part = 0; Part < UF; ++Part) { 1823 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 1824 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 1825 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 1826 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 1827 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 1828 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 1829 } 1830 } 1831 } 1832 1833 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 1834 assert(V != Induction && "The new induction variable should not be used."); 1835 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 1836 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1837 1838 // If we have a stride that is replaced by one, do it here. Defer this for 1839 // the VPlan-native path until we start running Legal checks in that path. 1840 if (!EnableVPlanNativePath && Legal->hasStride(V)) 1841 V = ConstantInt::get(V->getType(), 1); 1842 1843 // If we have a vector mapped to this value, return it. 1844 if (VectorLoopValueMap.hasVectorValue(V, Part)) 1845 return VectorLoopValueMap.getVectorValue(V, Part); 1846 1847 // If the value has not been vectorized, check if it has been scalarized 1848 // instead. If it has been scalarized, and we actually need the value in 1849 // vector form, we will construct the vector values on demand. 1850 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 1851 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 1852 1853 // If we've scalarized a value, that value should be an instruction. 1854 auto *I = cast<Instruction>(V); 1855 1856 // If we aren't vectorizing, we can just copy the scalar map values over to 1857 // the vector map. 1858 if (VF == 1) { 1859 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 1860 return ScalarValue; 1861 } 1862 1863 // Get the last scalar instruction we generated for V and Part. If the value 1864 // is known to be uniform after vectorization, this corresponds to lane zero 1865 // of the Part unroll iteration. Otherwise, the last instruction is the one 1866 // we created for the last vector lane of the Part unroll iteration. 1867 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 1868 auto *LastInst = cast<Instruction>( 1869 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 1870 1871 // Set the insert point after the last scalarized instruction. This ensures 1872 // the insertelement sequence will directly follow the scalar definitions. 1873 auto OldIP = Builder.saveIP(); 1874 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 1875 Builder.SetInsertPoint(&*NewIP); 1876 1877 // However, if we are vectorizing, we need to construct the vector values. 1878 // If the value is known to be uniform after vectorization, we can just 1879 // broadcast the scalar value corresponding to lane zero for each unroll 1880 // iteration. Otherwise, we construct the vector values using insertelement 1881 // instructions. Since the resulting vectors are stored in 1882 // VectorLoopValueMap, we will only generate the insertelements once. 1883 Value *VectorValue = nullptr; 1884 if (Cost->isUniformAfterVectorization(I, VF)) { 1885 VectorValue = getBroadcastInstrs(ScalarValue); 1886 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 1887 } else { 1888 // Initialize packing with insertelements to start from undef. 1889 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 1890 VectorLoopValueMap.setVectorValue(V, Part, Undef); 1891 for (unsigned Lane = 0; Lane < VF; ++Lane) 1892 packScalarIntoVectorValue(V, {Part, Lane}); 1893 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 1894 } 1895 Builder.restoreIP(OldIP); 1896 return VectorValue; 1897 } 1898 1899 // If this scalar is unknown, assume that it is a constant or that it is 1900 // loop invariant. Broadcast V and save the value for future uses. 1901 Value *B = getBroadcastInstrs(V); 1902 VectorLoopValueMap.setVectorValue(V, Part, B); 1903 return B; 1904 } 1905 1906 Value * 1907 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 1908 const VPIteration &Instance) { 1909 // If the value is not an instruction contained in the loop, it should 1910 // already be scalar. 1911 if (OrigLoop->isLoopInvariant(V)) 1912 return V; 1913 1914 assert(Instance.Lane > 0 1915 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 1916 : true && "Uniform values only have lane zero"); 1917 1918 // If the value from the original loop has not been vectorized, it is 1919 // represented by UF x VF scalar values in the new loop. Return the requested 1920 // scalar value. 1921 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 1922 return VectorLoopValueMap.getScalarValue(V, Instance); 1923 1924 // If the value has not been scalarized, get its entry in VectorLoopValueMap 1925 // for the given unroll part. If this entry is not a vector type (i.e., the 1926 // vectorization factor is one), there is no need to generate an 1927 // extractelement instruction. 1928 auto *U = getOrCreateVectorValue(V, Instance.Part); 1929 if (!U->getType()->isVectorTy()) { 1930 assert(VF == 1 && "Value not scalarized has non-vector type"); 1931 return U; 1932 } 1933 1934 // Otherwise, the value from the original loop has been vectorized and is 1935 // represented by UF vector values. Extract and return the requested scalar 1936 // value from the appropriate vector lane. 1937 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 1938 } 1939 1940 void InnerLoopVectorizer::packScalarIntoVectorValue( 1941 Value *V, const VPIteration &Instance) { 1942 assert(V != Induction && "The new induction variable should not be used."); 1943 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 1944 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 1945 1946 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 1947 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 1948 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 1949 Builder.getInt32(Instance.Lane)); 1950 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 1951 } 1952 1953 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 1954 assert(Vec->getType()->isVectorTy() && "Invalid type"); 1955 SmallVector<Constant *, 8> ShuffleMask; 1956 for (unsigned i = 0; i < VF; ++i) 1957 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 1958 1959 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 1960 ConstantVector::get(ShuffleMask), 1961 "reverse"); 1962 } 1963 1964 // Return whether we allow using masked interleave-groups (for dealing with 1965 // strided loads/stores that reside in predicated blocks, or for dealing 1966 // with gaps). 1967 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 1968 // If an override option has been passed in for interleaved accesses, use it. 1969 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 1970 return EnableMaskedInterleavedMemAccesses; 1971 1972 return TTI.enableMaskedInterleavedAccessVectorization(); 1973 } 1974 1975 // Try to vectorize the interleave group that \p Instr belongs to. 1976 // 1977 // E.g. Translate following interleaved load group (factor = 3): 1978 // for (i = 0; i < N; i+=3) { 1979 // R = Pic[i]; // Member of index 0 1980 // G = Pic[i+1]; // Member of index 1 1981 // B = Pic[i+2]; // Member of index 2 1982 // ... // do something to R, G, B 1983 // } 1984 // To: 1985 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 1986 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 1987 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 1988 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 1989 // 1990 // Or translate following interleaved store group (factor = 3): 1991 // for (i = 0; i < N; i+=3) { 1992 // ... do something to R, G, B 1993 // Pic[i] = R; // Member of index 0 1994 // Pic[i+1] = G; // Member of index 1 1995 // Pic[i+2] = B; // Member of index 2 1996 // } 1997 // To: 1998 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 1999 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2000 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2001 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2002 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2003 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr, 2004 VectorParts *BlockInMask) { 2005 const InterleaveGroup<Instruction> *Group = 2006 Cost->getInterleavedAccessGroup(Instr); 2007 assert(Group && "Fail to get an interleaved access group."); 2008 2009 // Skip if current instruction is not the insert position. 2010 if (Instr != Group->getInsertPos()) 2011 return; 2012 2013 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2014 Value *Ptr = getLoadStorePointerOperand(Instr); 2015 2016 // Prepare for the vector type of the interleaved load/store. 2017 Type *ScalarTy = getMemInstValueType(Instr); 2018 unsigned InterleaveFactor = Group->getFactor(); 2019 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2020 Type *PtrTy = VecTy->getPointerTo(getLoadStoreAddressSpace(Instr)); 2021 2022 // Prepare for the new pointers. 2023 setDebugLocFromInst(Builder, Ptr); 2024 SmallVector<Value *, 2> NewPtrs; 2025 unsigned Index = Group->getIndex(Instr); 2026 2027 VectorParts Mask; 2028 bool IsMaskForCondRequired = BlockInMask; 2029 if (IsMaskForCondRequired) { 2030 Mask = *BlockInMask; 2031 // TODO: extend the masked interleaved-group support to reversed access. 2032 assert(!Group->isReverse() && "Reversed masked interleave-group " 2033 "not supported."); 2034 } 2035 2036 // If the group is reverse, adjust the index to refer to the last vector lane 2037 // instead of the first. We adjust the index from the first vector lane, 2038 // rather than directly getting the pointer for lane VF - 1, because the 2039 // pointer operand of the interleaved access is supposed to be uniform. For 2040 // uniform instructions, we're only required to generate a value for the 2041 // first vector lane in each unroll iteration. 2042 if (Group->isReverse()) 2043 Index += (VF - 1) * Group->getFactor(); 2044 2045 bool InBounds = false; 2046 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2047 InBounds = gep->isInBounds(); 2048 2049 for (unsigned Part = 0; Part < UF; Part++) { 2050 Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0}); 2051 2052 // Notice current instruction could be any index. Need to adjust the address 2053 // to the member of index 0. 2054 // 2055 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2056 // b = A[i]; // Member of index 0 2057 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2058 // 2059 // E.g. A[i+1] = a; // Member of index 1 2060 // A[i] = b; // Member of index 0 2061 // A[i+2] = c; // Member of index 2 (Current instruction) 2062 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2063 NewPtr = Builder.CreateGEP(ScalarTy, NewPtr, Builder.getInt32(-Index)); 2064 if (InBounds) 2065 cast<GetElementPtrInst>(NewPtr)->setIsInBounds(true); 2066 2067 // Cast to the vector pointer type. 2068 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2069 } 2070 2071 setDebugLocFromInst(Builder, Instr); 2072 Value *UndefVec = UndefValue::get(VecTy); 2073 2074 Value *MaskForGaps = nullptr; 2075 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2076 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2077 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2078 } 2079 2080 // Vectorize the interleaved load group. 2081 if (isa<LoadInst>(Instr)) { 2082 // For each unroll part, create a wide load for the group. 2083 SmallVector<Value *, 2> NewLoads; 2084 for (unsigned Part = 0; Part < UF; Part++) { 2085 Instruction *NewLoad; 2086 if (IsMaskForCondRequired || MaskForGaps) { 2087 assert(useMaskedInterleavedAccesses(*TTI) && 2088 "masked interleaved groups are not allowed."); 2089 Value *GroupMask = MaskForGaps; 2090 if (IsMaskForCondRequired) { 2091 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2092 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2093 Value *ShuffledMask = Builder.CreateShuffleVector( 2094 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2095 GroupMask = MaskForGaps 2096 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2097 MaskForGaps) 2098 : ShuffledMask; 2099 } 2100 NewLoad = 2101 Builder.CreateMaskedLoad(NewPtrs[Part], Group->getAlignment(), 2102 GroupMask, UndefVec, "wide.masked.vec"); 2103 } 2104 else 2105 NewLoad = Builder.CreateAlignedLoad(VecTy, NewPtrs[Part], 2106 Group->getAlignment(), "wide.vec"); 2107 Group->addMetadata(NewLoad); 2108 NewLoads.push_back(NewLoad); 2109 } 2110 2111 // For each member in the group, shuffle out the appropriate data from the 2112 // wide loads. 2113 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2114 Instruction *Member = Group->getMember(I); 2115 2116 // Skip the gaps in the group. 2117 if (!Member) 2118 continue; 2119 2120 Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF); 2121 for (unsigned Part = 0; Part < UF; Part++) { 2122 Value *StridedVec = Builder.CreateShuffleVector( 2123 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2124 2125 // If this member has different type, cast the result type. 2126 if (Member->getType() != ScalarTy) { 2127 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2128 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2129 } 2130 2131 if (Group->isReverse()) 2132 StridedVec = reverseVector(StridedVec); 2133 2134 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2135 } 2136 } 2137 return; 2138 } 2139 2140 // The sub vector type for current instruction. 2141 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2142 2143 // Vectorize the interleaved store group. 2144 for (unsigned Part = 0; Part < UF; Part++) { 2145 // Collect the stored vector from each member. 2146 SmallVector<Value *, 4> StoredVecs; 2147 for (unsigned i = 0; i < InterleaveFactor; i++) { 2148 // Interleaved store group doesn't allow a gap, so each index has a member 2149 Instruction *Member = Group->getMember(i); 2150 assert(Member && "Fail to get a member from an interleaved store group"); 2151 2152 Value *StoredVec = getOrCreateVectorValue( 2153 cast<StoreInst>(Member)->getValueOperand(), Part); 2154 if (Group->isReverse()) 2155 StoredVec = reverseVector(StoredVec); 2156 2157 // If this member has different type, cast it to a unified type. 2158 2159 if (StoredVec->getType() != SubVT) 2160 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2161 2162 StoredVecs.push_back(StoredVec); 2163 } 2164 2165 // Concatenate all vectors into a wide vector. 2166 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2167 2168 // Interleave the elements in the wide vector. 2169 Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor); 2170 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2171 "interleaved.vec"); 2172 2173 Instruction *NewStoreInstr; 2174 if (IsMaskForCondRequired) { 2175 auto *Undefs = UndefValue::get(Mask[Part]->getType()); 2176 auto *RepMask = createReplicatedMask(Builder, InterleaveFactor, VF); 2177 Value *ShuffledMask = Builder.CreateShuffleVector( 2178 Mask[Part], Undefs, RepMask, "interleaved.mask"); 2179 NewStoreInstr = Builder.CreateMaskedStore( 2180 IVec, NewPtrs[Part], Group->getAlignment(), ShuffledMask); 2181 } 2182 else 2183 NewStoreInstr = Builder.CreateAlignedStore(IVec, NewPtrs[Part], 2184 Group->getAlignment()); 2185 2186 Group->addMetadata(NewStoreInstr); 2187 } 2188 } 2189 2190 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2191 VectorParts *BlockInMask) { 2192 // Attempt to issue a wide load. 2193 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2194 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2195 2196 assert((LI || SI) && "Invalid Load/Store instruction"); 2197 2198 LoopVectorizationCostModel::InstWidening Decision = 2199 Cost->getWideningDecision(Instr, VF); 2200 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 2201 "CM decision should be taken at this point"); 2202 if (Decision == LoopVectorizationCostModel::CM_Interleave) 2203 return vectorizeInterleaveGroup(Instr); 2204 2205 Type *ScalarDataTy = getMemInstValueType(Instr); 2206 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2207 Value *Ptr = getLoadStorePointerOperand(Instr); 2208 unsigned Alignment = getLoadStoreAlignment(Instr); 2209 // An alignment of 0 means target abi alignment. We need to use the scalar's 2210 // target abi alignment in such a case. 2211 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2212 if (!Alignment) 2213 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2214 unsigned AddressSpace = getLoadStoreAddressSpace(Instr); 2215 2216 // Determine if the pointer operand of the access is either consecutive or 2217 // reverse consecutive. 2218 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2219 bool ConsecutiveStride = 2220 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2221 bool CreateGatherScatter = 2222 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2223 2224 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2225 // gather/scatter. Otherwise Decision should have been to Scalarize. 2226 assert((ConsecutiveStride || CreateGatherScatter) && 2227 "The instruction should be scalarized"); 2228 2229 // Handle consecutive loads/stores. 2230 if (ConsecutiveStride) 2231 Ptr = getOrCreateScalarValue(Ptr, {0, 0}); 2232 2233 VectorParts Mask; 2234 bool isMaskRequired = BlockInMask; 2235 if (isMaskRequired) 2236 Mask = *BlockInMask; 2237 2238 bool InBounds = false; 2239 if (auto *gep = dyn_cast<GetElementPtrInst>( 2240 getLoadStorePointerOperand(Instr)->stripPointerCasts())) 2241 InBounds = gep->isInBounds(); 2242 2243 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2244 // Calculate the pointer for the specific unroll-part. 2245 GetElementPtrInst *PartPtr = nullptr; 2246 2247 if (Reverse) { 2248 // If the address is consecutive but reversed, then the 2249 // wide store needs to start at the last vector element. 2250 PartPtr = cast<GetElementPtrInst>( 2251 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF))); 2252 PartPtr->setIsInBounds(InBounds); 2253 PartPtr = cast<GetElementPtrInst>( 2254 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF))); 2255 PartPtr->setIsInBounds(InBounds); 2256 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2257 Mask[Part] = reverseVector(Mask[Part]); 2258 } else { 2259 PartPtr = cast<GetElementPtrInst>( 2260 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF))); 2261 PartPtr->setIsInBounds(InBounds); 2262 } 2263 2264 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2265 }; 2266 2267 // Handle Stores: 2268 if (SI) { 2269 setDebugLocFromInst(Builder, SI); 2270 2271 for (unsigned Part = 0; Part < UF; ++Part) { 2272 Instruction *NewSI = nullptr; 2273 Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part); 2274 if (CreateGatherScatter) { 2275 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2276 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2277 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2278 MaskPart); 2279 } else { 2280 if (Reverse) { 2281 // If we store to reverse consecutive memory locations, then we need 2282 // to reverse the order of elements in the stored value. 2283 StoredVal = reverseVector(StoredVal); 2284 // We don't want to update the value in the map as it might be used in 2285 // another expression. So don't call resetVectorValue(StoredVal). 2286 } 2287 auto *VecPtr = CreateVecPtr(Part, Ptr); 2288 if (isMaskRequired) 2289 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2290 Mask[Part]); 2291 else 2292 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2293 } 2294 addMetadata(NewSI, SI); 2295 } 2296 return; 2297 } 2298 2299 // Handle loads. 2300 assert(LI && "Must have a load instruction"); 2301 setDebugLocFromInst(Builder, LI); 2302 for (unsigned Part = 0; Part < UF; ++Part) { 2303 Value *NewLI; 2304 if (CreateGatherScatter) { 2305 Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr; 2306 Value *VectorGep = getOrCreateVectorValue(Ptr, Part); 2307 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2308 nullptr, "wide.masked.gather"); 2309 addMetadata(NewLI, LI); 2310 } else { 2311 auto *VecPtr = CreateVecPtr(Part, Ptr); 2312 if (isMaskRequired) 2313 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2314 UndefValue::get(DataTy), 2315 "wide.masked.load"); 2316 else 2317 NewLI = 2318 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2319 2320 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2321 addMetadata(NewLI, LI); 2322 if (Reverse) 2323 NewLI = reverseVector(NewLI); 2324 } 2325 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2326 } 2327 } 2328 2329 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2330 const VPIteration &Instance, 2331 bool IfPredicateInstr) { 2332 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2333 2334 setDebugLocFromInst(Builder, Instr); 2335 2336 // Does this instruction return a value ? 2337 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2338 2339 Instruction *Cloned = Instr->clone(); 2340 if (!IsVoidRetTy) 2341 Cloned->setName(Instr->getName() + ".cloned"); 2342 2343 // Replace the operands of the cloned instructions with their scalar 2344 // equivalents in the new loop. 2345 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2346 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2347 Cloned->setOperand(op, NewOp); 2348 } 2349 addNewMetadata(Cloned, Instr); 2350 2351 // Place the cloned scalar in the new loop. 2352 Builder.Insert(Cloned); 2353 2354 // Add the cloned scalar to the scalar map entry. 2355 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2356 2357 // If we just cloned a new assumption, add it the assumption cache. 2358 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2359 if (II->getIntrinsicID() == Intrinsic::assume) 2360 AC->registerAssumption(II); 2361 2362 // End if-block. 2363 if (IfPredicateInstr) 2364 PredicatedInstructions.push_back(Cloned); 2365 } 2366 2367 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2368 Value *End, Value *Step, 2369 Instruction *DL) { 2370 BasicBlock *Header = L->getHeader(); 2371 BasicBlock *Latch = L->getLoopLatch(); 2372 // As we're just creating this loop, it's possible no latch exists 2373 // yet. If so, use the header as this will be a single block loop. 2374 if (!Latch) 2375 Latch = Header; 2376 2377 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2378 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2379 setDebugLocFromInst(Builder, OldInst); 2380 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2381 2382 Builder.SetInsertPoint(Latch->getTerminator()); 2383 setDebugLocFromInst(Builder, OldInst); 2384 2385 // Create i+1 and fill the PHINode. 2386 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2387 Induction->addIncoming(Start, L->getLoopPreheader()); 2388 Induction->addIncoming(Next, Latch); 2389 // Create the compare. 2390 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2391 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2392 2393 // Now we have two terminators. Remove the old one from the block. 2394 Latch->getTerminator()->eraseFromParent(); 2395 2396 return Induction; 2397 } 2398 2399 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2400 if (TripCount) 2401 return TripCount; 2402 2403 assert(L && "Create Trip Count for null loop."); 2404 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2405 // Find the loop boundaries. 2406 ScalarEvolution *SE = PSE.getSE(); 2407 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2408 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2409 "Invalid loop count"); 2410 2411 Type *IdxTy = Legal->getWidestInductionType(); 2412 assert(IdxTy && "No type for induction"); 2413 2414 // The exit count might have the type of i64 while the phi is i32. This can 2415 // happen if we have an induction variable that is sign extended before the 2416 // compare. The only way that we get a backedge taken count is that the 2417 // induction variable was signed and as such will not overflow. In such a case 2418 // truncation is legal. 2419 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2420 IdxTy->getPrimitiveSizeInBits()) 2421 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2422 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2423 2424 // Get the total trip count from the count by adding 1. 2425 const SCEV *ExitCount = SE->getAddExpr( 2426 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2427 2428 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2429 2430 // Expand the trip count and place the new instructions in the preheader. 2431 // Notice that the pre-header does not change, only the loop body. 2432 SCEVExpander Exp(*SE, DL, "induction"); 2433 2434 // Count holds the overall loop count (N). 2435 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2436 L->getLoopPreheader()->getTerminator()); 2437 2438 if (TripCount->getType()->isPointerTy()) 2439 TripCount = 2440 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2441 L->getLoopPreheader()->getTerminator()); 2442 2443 return TripCount; 2444 } 2445 2446 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2447 if (VectorTripCount) 2448 return VectorTripCount; 2449 2450 Value *TC = getOrCreateTripCount(L); 2451 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2452 2453 Type *Ty = TC->getType(); 2454 Constant *Step = ConstantInt::get(Ty, VF * UF); 2455 2456 // If the tail is to be folded by masking, round the number of iterations N 2457 // up to a multiple of Step instead of rounding down. This is done by first 2458 // adding Step-1 and then rounding down. Note that it's ok if this addition 2459 // overflows: the vector induction variable will eventually wrap to zero given 2460 // that it starts at zero and its Step is a power of two; the loop will then 2461 // exit, with the last early-exit vector comparison also producing all-true. 2462 if (Cost->foldTailByMasking()) { 2463 assert(isPowerOf2_32(VF * UF) && 2464 "VF*UF must be a power of 2 when folding tail by masking"); 2465 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2466 } 2467 2468 // Now we need to generate the expression for the part of the loop that the 2469 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2470 // iterations are not required for correctness, or N - Step, otherwise. Step 2471 // is equal to the vectorization factor (number of SIMD elements) times the 2472 // unroll factor (number of SIMD instructions). 2473 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2474 2475 // If there is a non-reversed interleaved group that may speculatively access 2476 // memory out-of-bounds, we need to ensure that there will be at least one 2477 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2478 // the trip count, we set the remainder to be equal to the step. If the step 2479 // does not evenly divide the trip count, no adjustment is necessary since 2480 // there will already be scalar iterations. Note that the minimum iterations 2481 // check ensures that N >= Step. 2482 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2483 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2484 R = Builder.CreateSelect(IsZero, Step, R); 2485 } 2486 2487 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2488 2489 return VectorTripCount; 2490 } 2491 2492 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2493 const DataLayout &DL) { 2494 // Verify that V is a vector type with same number of elements as DstVTy. 2495 unsigned VF = DstVTy->getNumElements(); 2496 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2497 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2498 Type *SrcElemTy = SrcVecTy->getElementType(); 2499 Type *DstElemTy = DstVTy->getElementType(); 2500 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2501 "Vector elements must have same size"); 2502 2503 // Do a direct cast if element types are castable. 2504 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2505 return Builder.CreateBitOrPointerCast(V, DstVTy); 2506 } 2507 // V cannot be directly casted to desired vector type. 2508 // May happen when V is a floating point vector but DstVTy is a vector of 2509 // pointers or vice-versa. Handle this using a two-step bitcast using an 2510 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2511 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2512 "Only one type should be a pointer type"); 2513 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2514 "Only one type should be a floating point type"); 2515 Type *IntTy = 2516 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2517 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2518 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2519 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2520 } 2521 2522 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2523 BasicBlock *Bypass) { 2524 Value *Count = getOrCreateTripCount(L); 2525 BasicBlock *BB = L->getLoopPreheader(); 2526 IRBuilder<> Builder(BB->getTerminator()); 2527 2528 // Generate code to check if the loop's trip count is less than VF * UF, or 2529 // equal to it in case a scalar epilogue is required; this implies that the 2530 // vector trip count is zero. This check also covers the case where adding one 2531 // to the backedge-taken count overflowed leading to an incorrect trip count 2532 // of zero. In this case we will also jump to the scalar loop. 2533 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2534 : ICmpInst::ICMP_ULT; 2535 2536 // If tail is to be folded, vector loop takes care of all iterations. 2537 Value *CheckMinIters = Builder.getFalse(); 2538 if (!Cost->foldTailByMasking()) 2539 CheckMinIters = Builder.CreateICmp( 2540 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2541 "min.iters.check"); 2542 2543 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2544 // Update dominator tree immediately if the generated block is a 2545 // LoopBypassBlock because SCEV expansions to generate loop bypass 2546 // checks may query it before the current function is finished. 2547 DT->addNewBlock(NewBB, BB); 2548 if (L->getParentLoop()) 2549 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2550 ReplaceInstWithInst(BB->getTerminator(), 2551 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 2552 LoopBypassBlocks.push_back(BB); 2553 } 2554 2555 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2556 BasicBlock *BB = L->getLoopPreheader(); 2557 2558 // Generate the code to check that the SCEV assumptions that we made. 2559 // We want the new basic block to start at the first instruction in a 2560 // sequence of instructions that form a check. 2561 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2562 "scev.check"); 2563 Value *SCEVCheck = 2564 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 2565 2566 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2567 if (C->isZero()) 2568 return; 2569 2570 assert(!Cost->foldTailByMasking() && 2571 "Cannot SCEV check stride or overflow when folding tail"); 2572 // Create a new block containing the stride check. 2573 BB->setName("vector.scevcheck"); 2574 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2575 // Update dominator tree immediately if the generated block is a 2576 // LoopBypassBlock because SCEV expansions to generate loop bypass 2577 // checks may query it before the current function is finished. 2578 DT->addNewBlock(NewBB, BB); 2579 if (L->getParentLoop()) 2580 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2581 ReplaceInstWithInst(BB->getTerminator(), 2582 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 2583 LoopBypassBlocks.push_back(BB); 2584 AddedSafetyChecks = true; 2585 } 2586 2587 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2588 // VPlan-native path does not do any analysis for runtime checks currently. 2589 if (EnableVPlanNativePath) 2590 return; 2591 2592 BasicBlock *BB = L->getLoopPreheader(); 2593 2594 // Generate the code that checks in runtime if arrays overlap. We put the 2595 // checks into a separate block to make the more common case of few elements 2596 // faster. 2597 Instruction *FirstCheckInst; 2598 Instruction *MemRuntimeCheck; 2599 std::tie(FirstCheckInst, MemRuntimeCheck) = 2600 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 2601 if (!MemRuntimeCheck) 2602 return; 2603 2604 assert(!Cost->foldTailByMasking() && "Cannot check memory when folding tail"); 2605 // Create a new block containing the memory check. 2606 BB->setName("vector.memcheck"); 2607 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 2608 // Update dominator tree immediately if the generated block is a 2609 // LoopBypassBlock because SCEV expansions to generate loop bypass 2610 // checks may query it before the current function is finished. 2611 DT->addNewBlock(NewBB, BB); 2612 if (L->getParentLoop()) 2613 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 2614 ReplaceInstWithInst(BB->getTerminator(), 2615 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 2616 LoopBypassBlocks.push_back(BB); 2617 AddedSafetyChecks = true; 2618 2619 // We currently don't use LoopVersioning for the actual loop cloning but we 2620 // still use it to add the noalias metadata. 2621 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2622 PSE.getSE()); 2623 LVer->prepareNoAliasMetadata(); 2624 } 2625 2626 Value *InnerLoopVectorizer::emitTransformedIndex( 2627 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2628 const InductionDescriptor &ID) const { 2629 2630 SCEVExpander Exp(*SE, DL, "induction"); 2631 auto Step = ID.getStep(); 2632 auto StartValue = ID.getStartValue(); 2633 assert(Index->getType() == Step->getType() && 2634 "Index type does not match StepValue type"); 2635 2636 // Note: the IR at this point is broken. We cannot use SE to create any new 2637 // SCEV and then expand it, hoping that SCEV's simplification will give us 2638 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2639 // lead to various SCEV crashes. So all we can do is to use builder and rely 2640 // on InstCombine for future simplifications. Here we handle some trivial 2641 // cases only. 2642 auto CreateAdd = [&B](Value *X, Value *Y) { 2643 assert(X->getType() == Y->getType() && "Types don't match!"); 2644 if (auto *CX = dyn_cast<ConstantInt>(X)) 2645 if (CX->isZero()) 2646 return Y; 2647 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2648 if (CY->isZero()) 2649 return X; 2650 return B.CreateAdd(X, Y); 2651 }; 2652 2653 auto CreateMul = [&B](Value *X, Value *Y) { 2654 assert(X->getType() == Y->getType() && "Types don't match!"); 2655 if (auto *CX = dyn_cast<ConstantInt>(X)) 2656 if (CX->isOne()) 2657 return Y; 2658 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2659 if (CY->isOne()) 2660 return X; 2661 return B.CreateMul(X, Y); 2662 }; 2663 2664 switch (ID.getKind()) { 2665 case InductionDescriptor::IK_IntInduction: { 2666 assert(Index->getType() == StartValue->getType() && 2667 "Index type does not match StartValue type"); 2668 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2669 return B.CreateSub(StartValue, Index); 2670 auto *Offset = CreateMul( 2671 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2672 return CreateAdd(StartValue, Offset); 2673 } 2674 case InductionDescriptor::IK_PtrInduction: { 2675 assert(isa<SCEVConstant>(Step) && 2676 "Expected constant step for pointer induction"); 2677 return B.CreateGEP( 2678 StartValue->getType()->getPointerElementType(), StartValue, 2679 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2680 &*B.GetInsertPoint()))); 2681 } 2682 case InductionDescriptor::IK_FpInduction: { 2683 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2684 auto InductionBinOp = ID.getInductionBinOp(); 2685 assert(InductionBinOp && 2686 (InductionBinOp->getOpcode() == Instruction::FAdd || 2687 InductionBinOp->getOpcode() == Instruction::FSub) && 2688 "Original bin op should be defined for FP induction"); 2689 2690 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2691 2692 // Floating point operations had to be 'fast' to enable the induction. 2693 FastMathFlags Flags; 2694 Flags.setFast(); 2695 2696 Value *MulExp = B.CreateFMul(StepValue, Index); 2697 if (isa<Instruction>(MulExp)) 2698 // We have to check, the MulExp may be a constant. 2699 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2700 2701 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2702 "induction"); 2703 if (isa<Instruction>(BOp)) 2704 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2705 2706 return BOp; 2707 } 2708 case InductionDescriptor::IK_NoInduction: 2709 return nullptr; 2710 } 2711 llvm_unreachable("invalid enum"); 2712 } 2713 2714 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2715 /* 2716 In this function we generate a new loop. The new loop will contain 2717 the vectorized instructions while the old loop will continue to run the 2718 scalar remainder. 2719 2720 [ ] <-- loop iteration number check. 2721 / | 2722 / v 2723 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2724 | / | 2725 | / v 2726 || [ ] <-- vector pre header. 2727 |/ | 2728 | v 2729 | [ ] \ 2730 | [ ]_| <-- vector loop. 2731 | | 2732 | v 2733 | -[ ] <--- middle-block. 2734 | / | 2735 | / v 2736 -|- >[ ] <--- new preheader. 2737 | | 2738 | v 2739 | [ ] \ 2740 | [ ]_| <-- old scalar loop to handle remainder. 2741 \ | 2742 \ v 2743 >[ ] <-- exit block. 2744 ... 2745 */ 2746 2747 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 2748 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 2749 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 2750 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2751 assert(VectorPH && "Invalid loop structure"); 2752 assert(ExitBlock && "Must have an exit block"); 2753 2754 // Some loops have a single integer induction variable, while other loops 2755 // don't. One example is c++ iterators that often have multiple pointer 2756 // induction variables. In the code below we also support a case where we 2757 // don't have a single induction variable. 2758 // 2759 // We try to obtain an induction variable from the original loop as hard 2760 // as possible. However if we don't find one that: 2761 // - is an integer 2762 // - counts from zero, stepping by one 2763 // - is the size of the widest induction variable type 2764 // then we create a new one. 2765 OldInduction = Legal->getPrimaryInduction(); 2766 Type *IdxTy = Legal->getWidestInductionType(); 2767 2768 // Split the single block loop into the two loop structure described above. 2769 BasicBlock *VecBody = 2770 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 2771 BasicBlock *MiddleBlock = 2772 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 2773 BasicBlock *ScalarPH = 2774 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 2775 2776 // Create and register the new vector loop. 2777 Loop *Lp = LI->AllocateLoop(); 2778 Loop *ParentLoop = OrigLoop->getParentLoop(); 2779 2780 // Insert the new loop into the loop nest and register the new basic blocks 2781 // before calling any utilities such as SCEV that require valid LoopInfo. 2782 if (ParentLoop) { 2783 ParentLoop->addChildLoop(Lp); 2784 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 2785 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 2786 } else { 2787 LI->addTopLevelLoop(Lp); 2788 } 2789 Lp->addBasicBlockToLoop(VecBody, *LI); 2790 2791 // Find the loop boundaries. 2792 Value *Count = getOrCreateTripCount(Lp); 2793 2794 Value *StartIdx = ConstantInt::get(IdxTy, 0); 2795 2796 // Now, compare the new count to zero. If it is zero skip the vector loop and 2797 // jump to the scalar loop. This check also covers the case where the 2798 // backedge-taken count is uint##_max: adding one to it will overflow leading 2799 // to an incorrect trip count of zero. In this (rare) case we will also jump 2800 // to the scalar loop. 2801 emitMinimumIterationCountCheck(Lp, ScalarPH); 2802 2803 // Generate the code to check any assumptions that we've made for SCEV 2804 // expressions. 2805 emitSCEVChecks(Lp, ScalarPH); 2806 2807 // Generate the code that checks in runtime if arrays overlap. We put the 2808 // checks into a separate block to make the more common case of few elements 2809 // faster. 2810 emitMemRuntimeChecks(Lp, ScalarPH); 2811 2812 // Generate the induction variable. 2813 // The loop step is equal to the vectorization factor (num of SIMD elements) 2814 // times the unroll factor (num of SIMD instructions). 2815 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 2816 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 2817 Induction = 2818 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 2819 getDebugLocFromInstOrOperands(OldInduction)); 2820 2821 // We are going to resume the execution of the scalar loop. 2822 // Go over all of the induction variables that we found and fix the 2823 // PHIs that are left in the scalar version of the loop. 2824 // The starting values of PHI nodes depend on the counter of the last 2825 // iteration in the vectorized loop. 2826 // If we come from a bypass edge then we need to start from the original 2827 // start value. 2828 2829 // This variable saves the new starting index for the scalar loop. It is used 2830 // to test if there are any tail iterations left once the vector loop has 2831 // completed. 2832 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 2833 for (auto &InductionEntry : *List) { 2834 PHINode *OrigPhi = InductionEntry.first; 2835 InductionDescriptor II = InductionEntry.second; 2836 2837 // Create phi nodes to merge from the backedge-taken check block. 2838 PHINode *BCResumeVal = PHINode::Create( 2839 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 2840 // Copy original phi DL over to the new one. 2841 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 2842 Value *&EndValue = IVEndValues[OrigPhi]; 2843 if (OrigPhi == OldInduction) { 2844 // We know what the end value is. 2845 EndValue = CountRoundDown; 2846 } else { 2847 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 2848 Type *StepType = II.getStep()->getType(); 2849 Instruction::CastOps CastOp = 2850 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 2851 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 2852 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2853 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 2854 EndValue->setName("ind.end"); 2855 } 2856 2857 // The new PHI merges the original incoming value, in case of a bypass, 2858 // or the value at the end of the vectorized loop. 2859 BCResumeVal->addIncoming(EndValue, MiddleBlock); 2860 2861 // Fix the scalar body counter (PHI node). 2862 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 2863 2864 // The old induction's phi node in the scalar body needs the truncated 2865 // value. 2866 for (BasicBlock *BB : LoopBypassBlocks) 2867 BCResumeVal->addIncoming(II.getStartValue(), BB); 2868 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 2869 } 2870 2871 // We need the OrigLoop (scalar loop part) latch terminator to help 2872 // produce correct debug info for the middle block BB instructions. 2873 // The legality check stage guarantees that the loop will have a single 2874 // latch. 2875 assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) && 2876 "Scalar loop latch terminator isn't a branch"); 2877 BranchInst *ScalarLatchBr = 2878 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()); 2879 2880 // Add a check in the middle block to see if we have completed 2881 // all of the iterations in the first vector loop. 2882 // If (N - N%VF) == N, then we *don't* need to run the remainder. 2883 // If tail is to be folded, we know we don't need to run the remainder. 2884 Value *CmpN = Builder.getTrue(); 2885 if (!Cost->foldTailByMasking()) { 2886 CmpN = 2887 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 2888 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 2889 2890 // Provide correct stepping behaviour by using the same DebugLoc as the 2891 // scalar loop latch branch cmp if it exists. 2892 if (CmpInst *ScalarLatchCmp = 2893 dyn_cast_or_null<CmpInst>(ScalarLatchBr->getCondition())) 2894 cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchCmp->getDebugLoc()); 2895 } 2896 2897 BranchInst *BrInst = BranchInst::Create(ExitBlock, ScalarPH, CmpN); 2898 BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc()); 2899 ReplaceInstWithInst(MiddleBlock->getTerminator(), BrInst); 2900 2901 // Get ready to start creating new instructions into the vectorized body. 2902 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 2903 2904 // Save the state. 2905 LoopVectorPreHeader = Lp->getLoopPreheader(); 2906 LoopScalarPreHeader = ScalarPH; 2907 LoopMiddleBlock = MiddleBlock; 2908 LoopExitBlock = ExitBlock; 2909 LoopVectorBody = VecBody; 2910 LoopScalarBody = OldBasicBlock; 2911 2912 Optional<MDNode *> VectorizedLoopID = 2913 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 2914 LLVMLoopVectorizeFollowupVectorized}); 2915 if (VectorizedLoopID.hasValue()) { 2916 Lp->setLoopID(VectorizedLoopID.getValue()); 2917 2918 // Do not setAlreadyVectorized if loop attributes have been defined 2919 // explicitly. 2920 return LoopVectorPreHeader; 2921 } 2922 2923 // Keep all loop hints from the original loop on the vector loop (we'll 2924 // replace the vectorizer-specific hints below). 2925 if (MDNode *LID = OrigLoop->getLoopID()) 2926 Lp->setLoopID(LID); 2927 2928 LoopVectorizeHints Hints(Lp, true, *ORE); 2929 Hints.setAlreadyVectorized(); 2930 2931 return LoopVectorPreHeader; 2932 } 2933 2934 // Fix up external users of the induction variable. At this point, we are 2935 // in LCSSA form, with all external PHIs that use the IV having one input value, 2936 // coming from the remainder loop. We need those PHIs to also have a correct 2937 // value for the IV when arriving directly from the middle block. 2938 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 2939 const InductionDescriptor &II, 2940 Value *CountRoundDown, Value *EndValue, 2941 BasicBlock *MiddleBlock) { 2942 // There are two kinds of external IV usages - those that use the value 2943 // computed in the last iteration (the PHI) and those that use the penultimate 2944 // value (the value that feeds into the phi from the loop latch). 2945 // We allow both, but they, obviously, have different values. 2946 2947 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 2948 2949 DenseMap<Value *, Value *> MissingVals; 2950 2951 // An external user of the last iteration's value should see the value that 2952 // the remainder loop uses to initialize its own IV. 2953 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 2954 for (User *U : PostInc->users()) { 2955 Instruction *UI = cast<Instruction>(U); 2956 if (!OrigLoop->contains(UI)) { 2957 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2958 MissingVals[UI] = EndValue; 2959 } 2960 } 2961 2962 // An external user of the penultimate value need to see EndValue - Step. 2963 // The simplest way to get this is to recompute it from the constituent SCEVs, 2964 // that is Start + (Step * (CRD - 1)). 2965 for (User *U : OrigPhi->users()) { 2966 auto *UI = cast<Instruction>(U); 2967 if (!OrigLoop->contains(UI)) { 2968 const DataLayout &DL = 2969 OrigLoop->getHeader()->getModule()->getDataLayout(); 2970 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 2971 2972 IRBuilder<> B(MiddleBlock->getTerminator()); 2973 Value *CountMinusOne = B.CreateSub( 2974 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 2975 Value *CMO = 2976 !II.getStep()->getType()->isIntegerTy() 2977 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 2978 II.getStep()->getType()) 2979 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 2980 CMO->setName("cast.cmo"); 2981 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 2982 Escape->setName("ind.escape"); 2983 MissingVals[UI] = Escape; 2984 } 2985 } 2986 2987 for (auto &I : MissingVals) { 2988 PHINode *PHI = cast<PHINode>(I.first); 2989 // One corner case we have to handle is two IVs "chasing" each-other, 2990 // that is %IV2 = phi [...], [ %IV1, %latch ] 2991 // In this case, if IV1 has an external use, we need to avoid adding both 2992 // "last value of IV1" and "penultimate value of IV2". So, verify that we 2993 // don't already have an incoming value for the middle block. 2994 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 2995 PHI->addIncoming(I.second, MiddleBlock); 2996 } 2997 } 2998 2999 namespace { 3000 3001 struct CSEDenseMapInfo { 3002 static bool canHandle(const Instruction *I) { 3003 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3004 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3005 } 3006 3007 static inline Instruction *getEmptyKey() { 3008 return DenseMapInfo<Instruction *>::getEmptyKey(); 3009 } 3010 3011 static inline Instruction *getTombstoneKey() { 3012 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3013 } 3014 3015 static unsigned getHashValue(const Instruction *I) { 3016 assert(canHandle(I) && "Unknown instruction!"); 3017 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3018 I->value_op_end())); 3019 } 3020 3021 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3022 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3023 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3024 return LHS == RHS; 3025 return LHS->isIdenticalTo(RHS); 3026 } 3027 }; 3028 3029 } // end anonymous namespace 3030 3031 ///Perform cse of induction variable instructions. 3032 static void cse(BasicBlock *BB) { 3033 // Perform simple cse. 3034 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3035 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3036 Instruction *In = &*I++; 3037 3038 if (!CSEDenseMapInfo::canHandle(In)) 3039 continue; 3040 3041 // Check if we can replace this instruction with any of the 3042 // visited instructions. 3043 if (Instruction *V = CSEMap.lookup(In)) { 3044 In->replaceAllUsesWith(V); 3045 In->eraseFromParent(); 3046 continue; 3047 } 3048 3049 CSEMap[In] = In; 3050 } 3051 } 3052 3053 /// Estimate the overhead of scalarizing an instruction. This is a 3054 /// convenience wrapper for the type-based getScalarizationOverhead API. 3055 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3056 const TargetTransformInfo &TTI) { 3057 if (VF == 1) 3058 return 0; 3059 3060 unsigned Cost = 0; 3061 Type *RetTy = ToVectorTy(I->getType(), VF); 3062 if (!RetTy->isVoidTy() && 3063 (!isa<LoadInst>(I) || 3064 !TTI.supportsEfficientVectorElementLoadStore())) 3065 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3066 3067 // Some targets keep addresses scalar. 3068 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 3069 return Cost; 3070 3071 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3072 SmallVector<const Value *, 4> Operands(CI->arg_operands()); 3073 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3074 } 3075 else if (!isa<StoreInst>(I) || 3076 !TTI.supportsEfficientVectorElementLoadStore()) { 3077 SmallVector<const Value *, 4> Operands(I->operand_values()); 3078 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3079 } 3080 3081 return Cost; 3082 } 3083 3084 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3085 // Return the cost of the instruction, including scalarization overhead if it's 3086 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3087 // i.e. either vector version isn't available, or is too expensive. 3088 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3089 const TargetTransformInfo &TTI, 3090 const TargetLibraryInfo *TLI, 3091 bool &NeedToScalarize) { 3092 Function *F = CI->getCalledFunction(); 3093 StringRef FnName = CI->getCalledFunction()->getName(); 3094 Type *ScalarRetTy = CI->getType(); 3095 SmallVector<Type *, 4> Tys, ScalarTys; 3096 for (auto &ArgOp : CI->arg_operands()) 3097 ScalarTys.push_back(ArgOp->getType()); 3098 3099 // Estimate cost of scalarized vector call. The source operands are assumed 3100 // to be vectors, so we need to extract individual elements from there, 3101 // execute VF scalar calls, and then gather the result into the vector return 3102 // value. 3103 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3104 if (VF == 1) 3105 return ScalarCallCost; 3106 3107 // Compute corresponding vector type for return value and arguments. 3108 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3109 for (Type *ScalarTy : ScalarTys) 3110 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3111 3112 // Compute costs of unpacking argument values for the scalar calls and 3113 // packing the return values to a vector. 3114 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI); 3115 3116 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3117 3118 // If we can't emit a vector call for this function, then the currently found 3119 // cost is the cost we need to return. 3120 NeedToScalarize = true; 3121 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3122 return Cost; 3123 3124 // If the corresponding vector cost is cheaper, return its cost. 3125 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3126 if (VectorCallCost < Cost) { 3127 NeedToScalarize = false; 3128 return VectorCallCost; 3129 } 3130 return Cost; 3131 } 3132 3133 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3134 // factor VF. Return the cost of the instruction, including scalarization 3135 // overhead if it's needed. 3136 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3137 const TargetTransformInfo &TTI, 3138 const TargetLibraryInfo *TLI) { 3139 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3140 assert(ID && "Expected intrinsic call!"); 3141 3142 FastMathFlags FMF; 3143 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3144 FMF = FPMO->getFastMathFlags(); 3145 3146 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3147 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); 3148 } 3149 3150 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3151 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3152 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3153 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3154 } 3155 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3156 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3157 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3158 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3159 } 3160 3161 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3162 // For every instruction `I` in MinBWs, truncate the operands, create a 3163 // truncated version of `I` and reextend its result. InstCombine runs 3164 // later and will remove any ext/trunc pairs. 3165 SmallPtrSet<Value *, 4> Erased; 3166 for (const auto &KV : Cost->getMinimalBitwidths()) { 3167 // If the value wasn't vectorized, we must maintain the original scalar 3168 // type. The absence of the value from VectorLoopValueMap indicates that it 3169 // wasn't vectorized. 3170 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3171 continue; 3172 for (unsigned Part = 0; Part < UF; ++Part) { 3173 Value *I = getOrCreateVectorValue(KV.first, Part); 3174 if (Erased.find(I) != Erased.end() || I->use_empty() || 3175 !isa<Instruction>(I)) 3176 continue; 3177 Type *OriginalTy = I->getType(); 3178 Type *ScalarTruncatedTy = 3179 IntegerType::get(OriginalTy->getContext(), KV.second); 3180 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3181 OriginalTy->getVectorNumElements()); 3182 if (TruncatedTy == OriginalTy) 3183 continue; 3184 3185 IRBuilder<> B(cast<Instruction>(I)); 3186 auto ShrinkOperand = [&](Value *V) -> Value * { 3187 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3188 if (ZI->getSrcTy() == TruncatedTy) 3189 return ZI->getOperand(0); 3190 return B.CreateZExtOrTrunc(V, TruncatedTy); 3191 }; 3192 3193 // The actual instruction modification depends on the instruction type, 3194 // unfortunately. 3195 Value *NewI = nullptr; 3196 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3197 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3198 ShrinkOperand(BO->getOperand(1))); 3199 3200 // Any wrapping introduced by shrinking this operation shouldn't be 3201 // considered undefined behavior. So, we can't unconditionally copy 3202 // arithmetic wrapping flags to NewI. 3203 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3204 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3205 NewI = 3206 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3207 ShrinkOperand(CI->getOperand(1))); 3208 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3209 NewI = B.CreateSelect(SI->getCondition(), 3210 ShrinkOperand(SI->getTrueValue()), 3211 ShrinkOperand(SI->getFalseValue())); 3212 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3213 switch (CI->getOpcode()) { 3214 default: 3215 llvm_unreachable("Unhandled cast!"); 3216 case Instruction::Trunc: 3217 NewI = ShrinkOperand(CI->getOperand(0)); 3218 break; 3219 case Instruction::SExt: 3220 NewI = B.CreateSExtOrTrunc( 3221 CI->getOperand(0), 3222 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3223 break; 3224 case Instruction::ZExt: 3225 NewI = B.CreateZExtOrTrunc( 3226 CI->getOperand(0), 3227 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3228 break; 3229 } 3230 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3231 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3232 auto *O0 = B.CreateZExtOrTrunc( 3233 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3234 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3235 auto *O1 = B.CreateZExtOrTrunc( 3236 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3237 3238 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3239 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3240 // Don't do anything with the operands, just extend the result. 3241 continue; 3242 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3243 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3244 auto *O0 = B.CreateZExtOrTrunc( 3245 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3246 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3247 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3248 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3249 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3250 auto *O0 = B.CreateZExtOrTrunc( 3251 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3252 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3253 } else { 3254 // If we don't know what to do, be conservative and don't do anything. 3255 continue; 3256 } 3257 3258 // Lastly, extend the result. 3259 NewI->takeName(cast<Instruction>(I)); 3260 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3261 I->replaceAllUsesWith(Res); 3262 cast<Instruction>(I)->eraseFromParent(); 3263 Erased.insert(I); 3264 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3265 } 3266 } 3267 3268 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3269 for (const auto &KV : Cost->getMinimalBitwidths()) { 3270 // If the value wasn't vectorized, we must maintain the original scalar 3271 // type. The absence of the value from VectorLoopValueMap indicates that it 3272 // wasn't vectorized. 3273 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3274 continue; 3275 for (unsigned Part = 0; Part < UF; ++Part) { 3276 Value *I = getOrCreateVectorValue(KV.first, Part); 3277 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3278 if (Inst && Inst->use_empty()) { 3279 Value *NewI = Inst->getOperand(0); 3280 Inst->eraseFromParent(); 3281 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3282 } 3283 } 3284 } 3285 } 3286 3287 void InnerLoopVectorizer::fixVectorizedLoop() { 3288 // Insert truncates and extends for any truncated instructions as hints to 3289 // InstCombine. 3290 if (VF > 1) 3291 truncateToMinimalBitwidths(); 3292 3293 // Fix widened non-induction PHIs by setting up the PHI operands. 3294 if (OrigPHIsToFix.size()) { 3295 assert(EnableVPlanNativePath && 3296 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3297 fixNonInductionPHIs(); 3298 } 3299 3300 // At this point every instruction in the original loop is widened to a 3301 // vector form. Now we need to fix the recurrences in the loop. These PHI 3302 // nodes are currently empty because we did not want to introduce cycles. 3303 // This is the second stage of vectorizing recurrences. 3304 fixCrossIterationPHIs(); 3305 3306 // Update the dominator tree. 3307 // 3308 // FIXME: After creating the structure of the new loop, the dominator tree is 3309 // no longer up-to-date, and it remains that way until we update it 3310 // here. An out-of-date dominator tree is problematic for SCEV, 3311 // because SCEVExpander uses it to guide code generation. The 3312 // vectorizer use SCEVExpanders in several places. Instead, we should 3313 // keep the dominator tree up-to-date as we go. 3314 updateAnalysis(); 3315 3316 // Fix-up external users of the induction variables. 3317 for (auto &Entry : *Legal->getInductionVars()) 3318 fixupIVUsers(Entry.first, Entry.second, 3319 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3320 IVEndValues[Entry.first], LoopMiddleBlock); 3321 3322 fixLCSSAPHIs(); 3323 for (Instruction *PI : PredicatedInstructions) 3324 sinkScalarOperands(&*PI); 3325 3326 // Remove redundant induction instructions. 3327 cse(LoopVectorBody); 3328 } 3329 3330 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3331 // In order to support recurrences we need to be able to vectorize Phi nodes. 3332 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3333 // stage #2: We now need to fix the recurrences by adding incoming edges to 3334 // the currently empty PHI nodes. At this point every instruction in the 3335 // original loop is widened to a vector form so we can use them to construct 3336 // the incoming edges. 3337 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3338 // Handle first-order recurrences and reductions that need to be fixed. 3339 if (Legal->isFirstOrderRecurrence(&Phi)) 3340 fixFirstOrderRecurrence(&Phi); 3341 else if (Legal->isReductionVariable(&Phi)) 3342 fixReduction(&Phi); 3343 } 3344 } 3345 3346 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3347 // This is the second phase of vectorizing first-order recurrences. An 3348 // overview of the transformation is described below. Suppose we have the 3349 // following loop. 3350 // 3351 // for (int i = 0; i < n; ++i) 3352 // b[i] = a[i] - a[i - 1]; 3353 // 3354 // There is a first-order recurrence on "a". For this loop, the shorthand 3355 // scalar IR looks like: 3356 // 3357 // scalar.ph: 3358 // s_init = a[-1] 3359 // br scalar.body 3360 // 3361 // scalar.body: 3362 // i = phi [0, scalar.ph], [i+1, scalar.body] 3363 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3364 // s2 = a[i] 3365 // b[i] = s2 - s1 3366 // br cond, scalar.body, ... 3367 // 3368 // In this example, s1 is a recurrence because it's value depends on the 3369 // previous iteration. In the first phase of vectorization, we created a 3370 // temporary value for s1. We now complete the vectorization and produce the 3371 // shorthand vector IR shown below (for VF = 4, UF = 1). 3372 // 3373 // vector.ph: 3374 // v_init = vector(..., ..., ..., a[-1]) 3375 // br vector.body 3376 // 3377 // vector.body 3378 // i = phi [0, vector.ph], [i+4, vector.body] 3379 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3380 // v2 = a[i, i+1, i+2, i+3]; 3381 // v3 = vector(v1(3), v2(0, 1, 2)) 3382 // b[i, i+1, i+2, i+3] = v2 - v3 3383 // br cond, vector.body, middle.block 3384 // 3385 // middle.block: 3386 // x = v2(3) 3387 // br scalar.ph 3388 // 3389 // scalar.ph: 3390 // s_init = phi [x, middle.block], [a[-1], otherwise] 3391 // br scalar.body 3392 // 3393 // After execution completes the vector loop, we extract the next value of 3394 // the recurrence (x) to use as the initial value in the scalar loop. 3395 3396 // Get the original loop preheader and single loop latch. 3397 auto *Preheader = OrigLoop->getLoopPreheader(); 3398 auto *Latch = OrigLoop->getLoopLatch(); 3399 3400 // Get the initial and previous values of the scalar recurrence. 3401 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3402 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3403 3404 // Create a vector from the initial value. 3405 auto *VectorInit = ScalarInit; 3406 if (VF > 1) { 3407 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3408 VectorInit = Builder.CreateInsertElement( 3409 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3410 Builder.getInt32(VF - 1), "vector.recur.init"); 3411 } 3412 3413 // We constructed a temporary phi node in the first phase of vectorization. 3414 // This phi node will eventually be deleted. 3415 Builder.SetInsertPoint( 3416 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3417 3418 // Create a phi node for the new recurrence. The current value will either be 3419 // the initial value inserted into a vector or loop-varying vector value. 3420 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3421 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3422 3423 // Get the vectorized previous value of the last part UF - 1. It appears last 3424 // among all unrolled iterations, due to the order of their construction. 3425 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3426 3427 // Set the insertion point after the previous value if it is an instruction. 3428 // Note that the previous value may have been constant-folded so it is not 3429 // guaranteed to be an instruction in the vector loop. Also, if the previous 3430 // value is a phi node, we should insert after all the phi nodes to avoid 3431 // breaking basic block verification. 3432 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) || 3433 isa<PHINode>(PreviousLastPart)) 3434 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3435 else 3436 Builder.SetInsertPoint( 3437 &*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart))); 3438 3439 // We will construct a vector for the recurrence by combining the values for 3440 // the current and previous iterations. This is the required shuffle mask. 3441 SmallVector<Constant *, 8> ShuffleMask(VF); 3442 ShuffleMask[0] = Builder.getInt32(VF - 1); 3443 for (unsigned I = 1; I < VF; ++I) 3444 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 3445 3446 // The vector from which to take the initial value for the current iteration 3447 // (actual or unrolled). Initially, this is the vector phi node. 3448 Value *Incoming = VecPhi; 3449 3450 // Shuffle the current and previous vector and update the vector parts. 3451 for (unsigned Part = 0; Part < UF; ++Part) { 3452 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3453 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3454 auto *Shuffle = 3455 VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3456 ConstantVector::get(ShuffleMask)) 3457 : Incoming; 3458 PhiPart->replaceAllUsesWith(Shuffle); 3459 cast<Instruction>(PhiPart)->eraseFromParent(); 3460 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3461 Incoming = PreviousPart; 3462 } 3463 3464 // Fix the latch value of the new recurrence in the vector loop. 3465 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3466 3467 // Extract the last vector element in the middle block. This will be the 3468 // initial value for the recurrence when jumping to the scalar loop. 3469 auto *ExtractForScalar = Incoming; 3470 if (VF > 1) { 3471 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3472 ExtractForScalar = Builder.CreateExtractElement( 3473 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3474 } 3475 // Extract the second last element in the middle block if the 3476 // Phi is used outside the loop. We need to extract the phi itself 3477 // and not the last element (the phi update in the current iteration). This 3478 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3479 // when the scalar loop is not run at all. 3480 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3481 if (VF > 1) 3482 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3483 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3484 // When loop is unrolled without vectorizing, initialize 3485 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3486 // `Incoming`. This is analogous to the vectorized case above: extracting the 3487 // second last element when VF > 1. 3488 else if (UF > 1) 3489 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3490 3491 // Fix the initial value of the original recurrence in the scalar loop. 3492 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3493 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3494 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3495 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3496 Start->addIncoming(Incoming, BB); 3497 } 3498 3499 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 3500 Phi->setName("scalar.recur"); 3501 3502 // Finally, fix users of the recurrence outside the loop. The users will need 3503 // either the last value of the scalar recurrence or the last value of the 3504 // vector recurrence we extracted in the middle block. Since the loop is in 3505 // LCSSA form, we just need to find all the phi nodes for the original scalar 3506 // recurrence in the exit block, and then add an edge for the middle block. 3507 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3508 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3509 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3510 } 3511 } 3512 } 3513 3514 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3515 Constant *Zero = Builder.getInt32(0); 3516 3517 // Get it's reduction variable descriptor. 3518 assert(Legal->isReductionVariable(Phi) && 3519 "Unable to find the reduction variable"); 3520 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3521 3522 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3523 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3524 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3525 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3526 RdxDesc.getMinMaxRecurrenceKind(); 3527 setDebugLocFromInst(Builder, ReductionStartValue); 3528 3529 // We need to generate a reduction vector from the incoming scalar. 3530 // To do so, we need to generate the 'identity' vector and override 3531 // one of the elements with the incoming scalar reduction. We need 3532 // to do it in the vector-loop preheader. 3533 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3534 3535 // This is the vector-clone of the value that leaves the loop. 3536 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3537 3538 // Find the reduction identity variable. Zero for addition, or, xor, 3539 // one for multiplication, -1 for And. 3540 Value *Identity; 3541 Value *VectorStart; 3542 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3543 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3544 // MinMax reduction have the start value as their identify. 3545 if (VF == 1) { 3546 VectorStart = Identity = ReductionStartValue; 3547 } else { 3548 VectorStart = Identity = 3549 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3550 } 3551 } else { 3552 // Handle other reduction kinds: 3553 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3554 RK, VecTy->getScalarType()); 3555 if (VF == 1) { 3556 Identity = Iden; 3557 // This vector is the Identity vector where the first element is the 3558 // incoming scalar reduction. 3559 VectorStart = ReductionStartValue; 3560 } else { 3561 Identity = ConstantVector::getSplat(VF, Iden); 3562 3563 // This vector is the Identity vector where the first element is the 3564 // incoming scalar reduction. 3565 VectorStart = 3566 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3567 } 3568 } 3569 3570 // Fix the vector-loop phi. 3571 3572 // Reductions do not have to start at zero. They can start with 3573 // any loop invariant values. 3574 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3575 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3576 for (unsigned Part = 0; Part < UF; ++Part) { 3577 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3578 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3579 // Make sure to add the reduction stat value only to the 3580 // first unroll part. 3581 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3582 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3583 cast<PHINode>(VecRdxPhi) 3584 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3585 } 3586 3587 // Before each round, move the insertion point right between 3588 // the PHIs and the values we are going to write. 3589 // This allows us to write both PHINodes and the extractelement 3590 // instructions. 3591 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3592 3593 setDebugLocFromInst(Builder, LoopExitInst); 3594 3595 // If the vector reduction can be performed in a smaller type, we truncate 3596 // then extend the loop exit value to enable InstCombine to evaluate the 3597 // entire expression in the smaller type. 3598 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3599 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3600 Builder.SetInsertPoint( 3601 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3602 VectorParts RdxParts(UF); 3603 for (unsigned Part = 0; Part < UF; ++Part) { 3604 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3605 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3606 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3607 : Builder.CreateZExt(Trunc, VecTy); 3608 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3609 UI != RdxParts[Part]->user_end();) 3610 if (*UI != Trunc) { 3611 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3612 RdxParts[Part] = Extnd; 3613 } else { 3614 ++UI; 3615 } 3616 } 3617 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3618 for (unsigned Part = 0; Part < UF; ++Part) { 3619 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3620 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3621 } 3622 } 3623 3624 // Reduce all of the unrolled parts into a single vector. 3625 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3626 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3627 setDebugLocFromInst(Builder, ReducedPartRdx); 3628 for (unsigned Part = 1; Part < UF; ++Part) { 3629 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3630 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3631 // Floating point operations had to be 'fast' to enable the reduction. 3632 ReducedPartRdx = addFastMathFlag( 3633 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3634 ReducedPartRdx, "bin.rdx"), 3635 RdxDesc.getFastMathFlags()); 3636 else 3637 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3638 RdxPart); 3639 } 3640 3641 if (VF > 1) { 3642 bool NoNaN = Legal->hasFunNoNaNAttr(); 3643 ReducedPartRdx = 3644 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3645 // If the reduction can be performed in a smaller type, we need to extend 3646 // the reduction to the wider type before we branch to the original loop. 3647 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3648 ReducedPartRdx = 3649 RdxDesc.isSigned() 3650 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3651 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3652 } 3653 3654 // Create a phi node that merges control-flow from the backedge-taken check 3655 // block and the middle block. 3656 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3657 LoopScalarPreHeader->getTerminator()); 3658 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3659 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3660 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3661 3662 // Now, we need to fix the users of the reduction variable 3663 // inside and outside of the scalar remainder loop. 3664 // We know that the loop is in LCSSA form. We need to update the 3665 // PHI nodes in the exit blocks. 3666 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3667 // All PHINodes need to have a single entry edge, or two if 3668 // we already fixed them. 3669 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3670 3671 // We found a reduction value exit-PHI. Update it with the 3672 // incoming bypass edge. 3673 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3674 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3675 } // end of the LCSSA phi scan. 3676 3677 // Fix the scalar loop reduction variable with the incoming reduction sum 3678 // from the vector body and from the backedge value. 3679 int IncomingEdgeBlockIdx = 3680 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3681 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3682 // Pick the other block. 3683 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3684 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3685 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3686 } 3687 3688 void InnerLoopVectorizer::fixLCSSAPHIs() { 3689 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3690 if (LCSSAPhi.getNumIncomingValues() == 1) { 3691 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3692 // Non-instruction incoming values will have only one value. 3693 unsigned LastLane = 0; 3694 if (isa<Instruction>(IncomingValue)) 3695 LastLane = Cost->isUniformAfterVectorization( 3696 cast<Instruction>(IncomingValue), VF) 3697 ? 0 3698 : VF - 1; 3699 // Can be a loop invariant incoming value or the last scalar value to be 3700 // extracted from the vectorized loop. 3701 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3702 Value *lastIncomingValue = 3703 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3704 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3705 } 3706 } 3707 } 3708 3709 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3710 // The basic block and loop containing the predicated instruction. 3711 auto *PredBB = PredInst->getParent(); 3712 auto *VectorLoop = LI->getLoopFor(PredBB); 3713 3714 // Initialize a worklist with the operands of the predicated instruction. 3715 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3716 3717 // Holds instructions that we need to analyze again. An instruction may be 3718 // reanalyzed if we don't yet know if we can sink it or not. 3719 SmallVector<Instruction *, 8> InstsToReanalyze; 3720 3721 // Returns true if a given use occurs in the predicated block. Phi nodes use 3722 // their operands in their corresponding predecessor blocks. 3723 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3724 auto *I = cast<Instruction>(U.getUser()); 3725 BasicBlock *BB = I->getParent(); 3726 if (auto *Phi = dyn_cast<PHINode>(I)) 3727 BB = Phi->getIncomingBlock( 3728 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3729 return BB == PredBB; 3730 }; 3731 3732 // Iteratively sink the scalarized operands of the predicated instruction 3733 // into the block we created for it. When an instruction is sunk, it's 3734 // operands are then added to the worklist. The algorithm ends after one pass 3735 // through the worklist doesn't sink a single instruction. 3736 bool Changed; 3737 do { 3738 // Add the instructions that need to be reanalyzed to the worklist, and 3739 // reset the changed indicator. 3740 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 3741 InstsToReanalyze.clear(); 3742 Changed = false; 3743 3744 while (!Worklist.empty()) { 3745 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 3746 3747 // We can't sink an instruction if it is a phi node, is already in the 3748 // predicated block, is not in the loop, or may have side effects. 3749 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 3750 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 3751 continue; 3752 3753 // It's legal to sink the instruction if all its uses occur in the 3754 // predicated block. Otherwise, there's nothing to do yet, and we may 3755 // need to reanalyze the instruction. 3756 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 3757 InstsToReanalyze.push_back(I); 3758 continue; 3759 } 3760 3761 // Move the instruction to the beginning of the predicated block, and add 3762 // it's operands to the worklist. 3763 I->moveBefore(&*PredBB->getFirstInsertionPt()); 3764 Worklist.insert(I->op_begin(), I->op_end()); 3765 3766 // The sinking may have enabled other instructions to be sunk, so we will 3767 // need to iterate. 3768 Changed = true; 3769 } 3770 } while (Changed); 3771 } 3772 3773 void InnerLoopVectorizer::fixNonInductionPHIs() { 3774 for (PHINode *OrigPhi : OrigPHIsToFix) { 3775 PHINode *NewPhi = 3776 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 3777 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 3778 3779 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 3780 predecessors(OrigPhi->getParent())); 3781 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 3782 predecessors(NewPhi->getParent())); 3783 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 3784 "Scalar and Vector BB should have the same number of predecessors"); 3785 3786 // The insertion point in Builder may be invalidated by the time we get 3787 // here. Force the Builder insertion point to something valid so that we do 3788 // not run into issues during insertion point restore in 3789 // getOrCreateVectorValue calls below. 3790 Builder.SetInsertPoint(NewPhi); 3791 3792 // The predecessor order is preserved and we can rely on mapping between 3793 // scalar and vector block predecessors. 3794 for (unsigned i = 0; i < NumIncomingValues; ++i) { 3795 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 3796 3797 // When looking up the new scalar/vector values to fix up, use incoming 3798 // values from original phi. 3799 Value *ScIncV = 3800 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 3801 3802 // Scalar incoming value may need a broadcast 3803 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 3804 NewPhi->addIncoming(NewIncV, NewPredBB); 3805 } 3806 } 3807 } 3808 3809 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 3810 unsigned VF) { 3811 PHINode *P = cast<PHINode>(PN); 3812 if (EnableVPlanNativePath) { 3813 // Currently we enter here in the VPlan-native path for non-induction 3814 // PHIs where all control flow is uniform. We simply widen these PHIs. 3815 // Create a vector phi with no operands - the vector phi operands will be 3816 // set at the end of vector code generation. 3817 Type *VecTy = 3818 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3819 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 3820 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 3821 OrigPHIsToFix.push_back(P); 3822 3823 return; 3824 } 3825 3826 assert(PN->getParent() == OrigLoop->getHeader() && 3827 "Non-header phis should have been handled elsewhere"); 3828 3829 // In order to support recurrences we need to be able to vectorize Phi nodes. 3830 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3831 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 3832 // this value when we vectorize all of the instructions that use the PHI. 3833 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 3834 for (unsigned Part = 0; Part < UF; ++Part) { 3835 // This is phase one of vectorizing PHIs. 3836 Type *VecTy = 3837 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 3838 Value *EntryPart = PHINode::Create( 3839 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 3840 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 3841 } 3842 return; 3843 } 3844 3845 setDebugLocFromInst(Builder, P); 3846 3847 // This PHINode must be an induction variable. 3848 // Make sure that we know about it. 3849 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 3850 3851 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 3852 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3853 3854 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 3855 // which can be found from the original scalar operations. 3856 switch (II.getKind()) { 3857 case InductionDescriptor::IK_NoInduction: 3858 llvm_unreachable("Unknown induction"); 3859 case InductionDescriptor::IK_IntInduction: 3860 case InductionDescriptor::IK_FpInduction: 3861 llvm_unreachable("Integer/fp induction is handled elsewhere."); 3862 case InductionDescriptor::IK_PtrInduction: { 3863 // Handle the pointer induction variable case. 3864 assert(P->getType()->isPointerTy() && "Unexpected type."); 3865 // This is the normalized GEP that starts counting at zero. 3866 Value *PtrInd = Induction; 3867 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 3868 // Determine the number of scalars we need to generate for each unroll 3869 // iteration. If the instruction is uniform, we only need to generate the 3870 // first lane. Otherwise, we generate all VF values. 3871 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 3872 // These are the scalar results. Notice that we don't generate vector GEPs 3873 // because scalar GEPs result in better code. 3874 for (unsigned Part = 0; Part < UF; ++Part) { 3875 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3876 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 3877 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 3878 Value *SclrGep = 3879 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 3880 SclrGep->setName("next.gep"); 3881 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 3882 } 3883 } 3884 return; 3885 } 3886 } 3887 } 3888 3889 /// A helper function for checking whether an integer division-related 3890 /// instruction may divide by zero (in which case it must be predicated if 3891 /// executed conditionally in the scalar code). 3892 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 3893 /// Non-zero divisors that are non compile-time constants will not be 3894 /// converted into multiplication, so we will still end up scalarizing 3895 /// the division, but can do so w/o predication. 3896 static bool mayDivideByZero(Instruction &I) { 3897 assert((I.getOpcode() == Instruction::UDiv || 3898 I.getOpcode() == Instruction::SDiv || 3899 I.getOpcode() == Instruction::URem || 3900 I.getOpcode() == Instruction::SRem) && 3901 "Unexpected instruction"); 3902 Value *Divisor = I.getOperand(1); 3903 auto *CInt = dyn_cast<ConstantInt>(Divisor); 3904 return !CInt || CInt->isZero(); 3905 } 3906 3907 void InnerLoopVectorizer::widenInstruction(Instruction &I) { 3908 switch (I.getOpcode()) { 3909 case Instruction::Br: 3910 case Instruction::PHI: 3911 llvm_unreachable("This instruction is handled by a different recipe."); 3912 case Instruction::GetElementPtr: { 3913 // Construct a vector GEP by widening the operands of the scalar GEP as 3914 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 3915 // results in a vector of pointers when at least one operand of the GEP 3916 // is vector-typed. Thus, to keep the representation compact, we only use 3917 // vector-typed operands for loop-varying values. 3918 auto *GEP = cast<GetElementPtrInst>(&I); 3919 3920 if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) { 3921 // If we are vectorizing, but the GEP has only loop-invariant operands, 3922 // the GEP we build (by only using vector-typed operands for 3923 // loop-varying values) would be a scalar pointer. Thus, to ensure we 3924 // produce a vector of pointers, we need to either arbitrarily pick an 3925 // operand to broadcast, or broadcast a clone of the original GEP. 3926 // Here, we broadcast a clone of the original. 3927 // 3928 // TODO: If at some point we decide to scalarize instructions having 3929 // loop-invariant operands, this special case will no longer be 3930 // required. We would add the scalarization decision to 3931 // collectLoopScalars() and teach getVectorValue() to broadcast 3932 // the lane-zero scalar value. 3933 auto *Clone = Builder.Insert(GEP->clone()); 3934 for (unsigned Part = 0; Part < UF; ++Part) { 3935 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 3936 VectorLoopValueMap.setVectorValue(&I, Part, EntryPart); 3937 addMetadata(EntryPart, GEP); 3938 } 3939 } else { 3940 // If the GEP has at least one loop-varying operand, we are sure to 3941 // produce a vector of pointers. But if we are only unrolling, we want 3942 // to produce a scalar GEP for each unroll part. Thus, the GEP we 3943 // produce with the code below will be scalar (if VF == 1) or vector 3944 // (otherwise). Note that for the unroll-only case, we still maintain 3945 // values in the vector mapping with initVector, as we do for other 3946 // instructions. 3947 for (unsigned Part = 0; Part < UF; ++Part) { 3948 // The pointer operand of the new GEP. If it's loop-invariant, we 3949 // won't broadcast it. 3950 auto *Ptr = 3951 OrigLoop->isLoopInvariant(GEP->getPointerOperand()) 3952 ? GEP->getPointerOperand() 3953 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 3954 3955 // Collect all the indices for the new GEP. If any index is 3956 // loop-invariant, we won't broadcast it. 3957 SmallVector<Value *, 4> Indices; 3958 for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) { 3959 if (OrigLoop->isLoopInvariant(U.get())) 3960 Indices.push_back(U.get()); 3961 else 3962 Indices.push_back(getOrCreateVectorValue(U.get(), Part)); 3963 } 3964 3965 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 3966 // but it should be a vector, otherwise. 3967 auto *NewGEP = 3968 GEP->isInBounds() 3969 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 3970 Indices) 3971 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 3972 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 3973 "NewGEP is not a pointer vector"); 3974 VectorLoopValueMap.setVectorValue(&I, Part, NewGEP); 3975 addMetadata(NewGEP, GEP); 3976 } 3977 } 3978 3979 break; 3980 } 3981 case Instruction::UDiv: 3982 case Instruction::SDiv: 3983 case Instruction::SRem: 3984 case Instruction::URem: 3985 case Instruction::Add: 3986 case Instruction::FAdd: 3987 case Instruction::Sub: 3988 case Instruction::FSub: 3989 case Instruction::Mul: 3990 case Instruction::FMul: 3991 case Instruction::FDiv: 3992 case Instruction::FRem: 3993 case Instruction::Shl: 3994 case Instruction::LShr: 3995 case Instruction::AShr: 3996 case Instruction::And: 3997 case Instruction::Or: 3998 case Instruction::Xor: { 3999 // Just widen binops. 4000 auto *BinOp = cast<BinaryOperator>(&I); 4001 setDebugLocFromInst(Builder, BinOp); 4002 4003 for (unsigned Part = 0; Part < UF; ++Part) { 4004 Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part); 4005 Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part); 4006 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); 4007 4008 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4009 VecOp->copyIRFlags(BinOp); 4010 4011 // Use this vector value for all users of the original instruction. 4012 VectorLoopValueMap.setVectorValue(&I, Part, V); 4013 addMetadata(V, BinOp); 4014 } 4015 4016 break; 4017 } 4018 case Instruction::Select: { 4019 // Widen selects. 4020 // If the selector is loop invariant we can create a select 4021 // instruction with a scalar condition. Otherwise, use vector-select. 4022 auto *SE = PSE.getSE(); 4023 bool InvariantCond = 4024 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4025 setDebugLocFromInst(Builder, &I); 4026 4027 // The condition can be loop invariant but still defined inside the 4028 // loop. This means that we can't just use the original 'cond' value. 4029 // We have to take the 'vectorized' value and pick the first lane. 4030 // Instcombine will make this a no-op. 4031 4032 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4033 4034 for (unsigned Part = 0; Part < UF; ++Part) { 4035 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4036 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4037 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4038 Value *Sel = 4039 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4040 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4041 addMetadata(Sel, &I); 4042 } 4043 4044 break; 4045 } 4046 4047 case Instruction::ICmp: 4048 case Instruction::FCmp: { 4049 // Widen compares. Generate vector compares. 4050 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4051 auto *Cmp = dyn_cast<CmpInst>(&I); 4052 setDebugLocFromInst(Builder, Cmp); 4053 for (unsigned Part = 0; Part < UF; ++Part) { 4054 Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part); 4055 Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part); 4056 Value *C = nullptr; 4057 if (FCmp) { 4058 // Propagate fast math flags. 4059 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4060 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4061 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4062 } else { 4063 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4064 } 4065 VectorLoopValueMap.setVectorValue(&I, Part, C); 4066 addMetadata(C, &I); 4067 } 4068 4069 break; 4070 } 4071 4072 case Instruction::ZExt: 4073 case Instruction::SExt: 4074 case Instruction::FPToUI: 4075 case Instruction::FPToSI: 4076 case Instruction::FPExt: 4077 case Instruction::PtrToInt: 4078 case Instruction::IntToPtr: 4079 case Instruction::SIToFP: 4080 case Instruction::UIToFP: 4081 case Instruction::Trunc: 4082 case Instruction::FPTrunc: 4083 case Instruction::BitCast: { 4084 auto *CI = dyn_cast<CastInst>(&I); 4085 setDebugLocFromInst(Builder, CI); 4086 4087 /// Vectorize casts. 4088 Type *DestTy = 4089 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4090 4091 for (unsigned Part = 0; Part < UF; ++Part) { 4092 Value *A = getOrCreateVectorValue(CI->getOperand(0), Part); 4093 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4094 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4095 addMetadata(Cast, &I); 4096 } 4097 break; 4098 } 4099 4100 case Instruction::Call: { 4101 // Ignore dbg intrinsics. 4102 if (isa<DbgInfoIntrinsic>(I)) 4103 break; 4104 setDebugLocFromInst(Builder, &I); 4105 4106 Module *M = I.getParent()->getParent()->getParent(); 4107 auto *CI = cast<CallInst>(&I); 4108 4109 StringRef FnName = CI->getCalledFunction()->getName(); 4110 Function *F = CI->getCalledFunction(); 4111 Type *RetTy = ToVectorTy(CI->getType(), VF); 4112 SmallVector<Type *, 4> Tys; 4113 for (Value *ArgOperand : CI->arg_operands()) 4114 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4115 4116 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4117 4118 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4119 // version of the instruction. 4120 // Is it beneficial to perform intrinsic call compared to lib call? 4121 bool NeedToScalarize; 4122 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4123 bool UseVectorIntrinsic = 4124 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4125 assert((UseVectorIntrinsic || !NeedToScalarize) && 4126 "Instruction should be scalarized elsewhere."); 4127 4128 for (unsigned Part = 0; Part < UF; ++Part) { 4129 SmallVector<Value *, 4> Args; 4130 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4131 Value *Arg = CI->getArgOperand(i); 4132 // Some intrinsics have a scalar argument - don't replace it with a 4133 // vector. 4134 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) 4135 Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part); 4136 Args.push_back(Arg); 4137 } 4138 4139 Function *VectorF; 4140 if (UseVectorIntrinsic) { 4141 // Use vector version of the intrinsic. 4142 Type *TysForDecl[] = {CI->getType()}; 4143 if (VF > 1) 4144 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4145 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4146 } else { 4147 // Use vector version of the library call. 4148 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4149 assert(!VFnName.empty() && "Vector function name is empty."); 4150 VectorF = M->getFunction(VFnName); 4151 if (!VectorF) { 4152 // Generate a declaration 4153 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4154 VectorF = 4155 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4156 VectorF->copyAttributesFrom(F); 4157 } 4158 } 4159 assert(VectorF && "Can't create vector function."); 4160 4161 SmallVector<OperandBundleDef, 1> OpBundles; 4162 CI->getOperandBundlesAsDefs(OpBundles); 4163 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4164 4165 if (isa<FPMathOperator>(V)) 4166 V->copyFastMathFlags(CI); 4167 4168 VectorLoopValueMap.setVectorValue(&I, Part, V); 4169 addMetadata(V, &I); 4170 } 4171 4172 break; 4173 } 4174 4175 default: 4176 // This instruction is not vectorized by simple widening. 4177 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4178 llvm_unreachable("Unhandled instruction!"); 4179 } // end of switch. 4180 } 4181 4182 void InnerLoopVectorizer::updateAnalysis() { 4183 // Forget the original basic block. 4184 PSE.getSE()->forgetLoop(OrigLoop); 4185 4186 // DT is not kept up-to-date for outer loop vectorization 4187 if (EnableVPlanNativePath) 4188 return; 4189 4190 // Update the dominator tree information. 4191 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4192 "Entry does not dominate exit."); 4193 4194 DT->addNewBlock(LoopMiddleBlock, 4195 LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4196 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4197 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4198 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4199 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 4200 } 4201 4202 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4203 // We should not collect Scalars more than once per VF. Right now, this 4204 // function is called from collectUniformsAndScalars(), which already does 4205 // this check. Collecting Scalars for VF=1 does not make any sense. 4206 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4207 "This function should not be visited twice for the same VF"); 4208 4209 SmallSetVector<Instruction *, 8> Worklist; 4210 4211 // These sets are used to seed the analysis with pointers used by memory 4212 // accesses that will remain scalar. 4213 SmallSetVector<Instruction *, 8> ScalarPtrs; 4214 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4215 4216 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4217 // The pointer operands of loads and stores will be scalar as long as the 4218 // memory access is not a gather or scatter operation. The value operand of a 4219 // store will remain scalar if the store is scalarized. 4220 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4221 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4222 assert(WideningDecision != CM_Unknown && 4223 "Widening decision should be ready at this moment"); 4224 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4225 if (Ptr == Store->getValueOperand()) 4226 return WideningDecision == CM_Scalarize; 4227 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4228 "Ptr is neither a value or pointer operand"); 4229 return WideningDecision != CM_GatherScatter; 4230 }; 4231 4232 // A helper that returns true if the given value is a bitcast or 4233 // getelementptr instruction contained in the loop. 4234 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4235 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4236 isa<GetElementPtrInst>(V)) && 4237 !TheLoop->isLoopInvariant(V); 4238 }; 4239 4240 // A helper that evaluates a memory access's use of a pointer. If the use 4241 // will be a scalar use, and the pointer is only used by memory accesses, we 4242 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4243 // PossibleNonScalarPtrs. 4244 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4245 // We only care about bitcast and getelementptr instructions contained in 4246 // the loop. 4247 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4248 return; 4249 4250 // If the pointer has already been identified as scalar (e.g., if it was 4251 // also identified as uniform), there's nothing to do. 4252 auto *I = cast<Instruction>(Ptr); 4253 if (Worklist.count(I)) 4254 return; 4255 4256 // If the use of the pointer will be a scalar use, and all users of the 4257 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4258 // place the pointer in PossibleNonScalarPtrs. 4259 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4260 return isa<LoadInst>(U) || isa<StoreInst>(U); 4261 })) 4262 ScalarPtrs.insert(I); 4263 else 4264 PossibleNonScalarPtrs.insert(I); 4265 }; 4266 4267 // We seed the scalars analysis with three classes of instructions: (1) 4268 // instructions marked uniform-after-vectorization, (2) bitcast and 4269 // getelementptr instructions used by memory accesses requiring a scalar use, 4270 // and (3) pointer induction variables and their update instructions (we 4271 // currently only scalarize these). 4272 // 4273 // (1) Add to the worklist all instructions that have been identified as 4274 // uniform-after-vectorization. 4275 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4276 4277 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4278 // memory accesses requiring a scalar use. The pointer operands of loads and 4279 // stores will be scalar as long as the memory accesses is not a gather or 4280 // scatter operation. The value operand of a store will remain scalar if the 4281 // store is scalarized. 4282 for (auto *BB : TheLoop->blocks()) 4283 for (auto &I : *BB) { 4284 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4285 evaluatePtrUse(Load, Load->getPointerOperand()); 4286 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4287 evaluatePtrUse(Store, Store->getPointerOperand()); 4288 evaluatePtrUse(Store, Store->getValueOperand()); 4289 } 4290 } 4291 for (auto *I : ScalarPtrs) 4292 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4293 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4294 Worklist.insert(I); 4295 } 4296 4297 // (3) Add to the worklist all pointer induction variables and their update 4298 // instructions. 4299 // 4300 // TODO: Once we are able to vectorize pointer induction variables we should 4301 // no longer insert them into the worklist here. 4302 auto *Latch = TheLoop->getLoopLatch(); 4303 for (auto &Induction : *Legal->getInductionVars()) { 4304 auto *Ind = Induction.first; 4305 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4306 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4307 continue; 4308 Worklist.insert(Ind); 4309 Worklist.insert(IndUpdate); 4310 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4311 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4312 << "\n"); 4313 } 4314 4315 // Insert the forced scalars. 4316 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4317 // induction variable when the PHI user is scalarized. 4318 auto ForcedScalar = ForcedScalars.find(VF); 4319 if (ForcedScalar != ForcedScalars.end()) 4320 for (auto *I : ForcedScalar->second) 4321 Worklist.insert(I); 4322 4323 // Expand the worklist by looking through any bitcasts and getelementptr 4324 // instructions we've already identified as scalar. This is similar to the 4325 // expansion step in collectLoopUniforms(); however, here we're only 4326 // expanding to include additional bitcasts and getelementptr instructions. 4327 unsigned Idx = 0; 4328 while (Idx != Worklist.size()) { 4329 Instruction *Dst = Worklist[Idx++]; 4330 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4331 continue; 4332 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4333 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4334 auto *J = cast<Instruction>(U); 4335 return !TheLoop->contains(J) || Worklist.count(J) || 4336 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4337 isScalarUse(J, Src)); 4338 })) { 4339 Worklist.insert(Src); 4340 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4341 } 4342 } 4343 4344 // An induction variable will remain scalar if all users of the induction 4345 // variable and induction variable update remain scalar. 4346 for (auto &Induction : *Legal->getInductionVars()) { 4347 auto *Ind = Induction.first; 4348 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4349 4350 // We already considered pointer induction variables, so there's no reason 4351 // to look at their users again. 4352 // 4353 // TODO: Once we are able to vectorize pointer induction variables we 4354 // should no longer skip over them here. 4355 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4356 continue; 4357 4358 // Determine if all users of the induction variable are scalar after 4359 // vectorization. 4360 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4361 auto *I = cast<Instruction>(U); 4362 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4363 }); 4364 if (!ScalarInd) 4365 continue; 4366 4367 // Determine if all users of the induction variable update instruction are 4368 // scalar after vectorization. 4369 auto ScalarIndUpdate = 4370 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4371 auto *I = cast<Instruction>(U); 4372 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4373 }); 4374 if (!ScalarIndUpdate) 4375 continue; 4376 4377 // The induction variable and its update instruction will remain scalar. 4378 Worklist.insert(Ind); 4379 Worklist.insert(IndUpdate); 4380 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4381 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4382 << "\n"); 4383 } 4384 4385 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4386 } 4387 4388 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4389 if (!blockNeedsPredication(I->getParent())) 4390 return false; 4391 switch(I->getOpcode()) { 4392 default: 4393 break; 4394 case Instruction::Load: 4395 case Instruction::Store: { 4396 if (!Legal->isMaskRequired(I)) 4397 return false; 4398 auto *Ptr = getLoadStorePointerOperand(I); 4399 auto *Ty = getMemInstValueType(I); 4400 // We have already decided how to vectorize this instruction, get that 4401 // result. 4402 if (VF > 1) { 4403 InstWidening WideningDecision = getWideningDecision(I, VF); 4404 assert(WideningDecision != CM_Unknown && 4405 "Widening decision should be ready at this moment"); 4406 return WideningDecision == CM_Scalarize; 4407 } 4408 return isa<LoadInst>(I) ? 4409 !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty)) 4410 : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty)); 4411 } 4412 case Instruction::UDiv: 4413 case Instruction::SDiv: 4414 case Instruction::SRem: 4415 case Instruction::URem: 4416 return mayDivideByZero(*I); 4417 } 4418 return false; 4419 } 4420 4421 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4422 unsigned VF) { 4423 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4424 assert(getWideningDecision(I, VF) == CM_Unknown && 4425 "Decision should not be set yet."); 4426 auto *Group = getInterleavedAccessGroup(I); 4427 assert(Group && "Must have a group."); 4428 4429 // Check if masking is required. 4430 // A Group may need masking for one of two reasons: it resides in a block that 4431 // needs predication, or it was decided to use masking to deal with gaps. 4432 bool PredicatedAccessRequiresMasking = 4433 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4434 bool AccessWithGapsRequiresMasking = 4435 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 4436 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4437 return true; 4438 4439 // If masked interleaving is required, we expect that the user/target had 4440 // enabled it, because otherwise it either wouldn't have been created or 4441 // it should have been invalidated by the CostModel. 4442 assert(useMaskedInterleavedAccesses(TTI) && 4443 "Masked interleave-groups for predicated accesses are not enabled."); 4444 4445 auto *Ty = getMemInstValueType(I); 4446 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty) 4447 : TTI.isLegalMaskedStore(Ty); 4448 } 4449 4450 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4451 unsigned VF) { 4452 // Get and ensure we have a valid memory instruction. 4453 LoadInst *LI = dyn_cast<LoadInst>(I); 4454 StoreInst *SI = dyn_cast<StoreInst>(I); 4455 assert((LI || SI) && "Invalid memory instruction"); 4456 4457 auto *Ptr = getLoadStorePointerOperand(I); 4458 4459 // In order to be widened, the pointer should be consecutive, first of all. 4460 if (!Legal->isConsecutivePtr(Ptr)) 4461 return false; 4462 4463 // If the instruction is a store located in a predicated block, it will be 4464 // scalarized. 4465 if (isScalarWithPredication(I)) 4466 return false; 4467 4468 // If the instruction's allocated size doesn't equal it's type size, it 4469 // requires padding and will be scalarized. 4470 auto &DL = I->getModule()->getDataLayout(); 4471 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4472 if (hasIrregularType(ScalarTy, DL, VF)) 4473 return false; 4474 4475 return true; 4476 } 4477 4478 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4479 // We should not collect Uniforms more than once per VF. Right now, 4480 // this function is called from collectUniformsAndScalars(), which 4481 // already does this check. Collecting Uniforms for VF=1 does not make any 4482 // sense. 4483 4484 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4485 "This function should not be visited twice for the same VF"); 4486 4487 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4488 // not analyze again. Uniforms.count(VF) will return 1. 4489 Uniforms[VF].clear(); 4490 4491 // We now know that the loop is vectorizable! 4492 // Collect instructions inside the loop that will remain uniform after 4493 // vectorization. 4494 4495 // Global values, params and instructions outside of current loop are out of 4496 // scope. 4497 auto isOutOfScope = [&](Value *V) -> bool { 4498 Instruction *I = dyn_cast<Instruction>(V); 4499 return (!I || !TheLoop->contains(I)); 4500 }; 4501 4502 SetVector<Instruction *> Worklist; 4503 BasicBlock *Latch = TheLoop->getLoopLatch(); 4504 4505 // Start with the conditional branch. If the branch condition is an 4506 // instruction contained in the loop that is only used by the branch, it is 4507 // uniform. 4508 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4509 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 4510 Worklist.insert(Cmp); 4511 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 4512 } 4513 4514 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4515 // are pointers that are treated like consecutive pointers during 4516 // vectorization. The pointer operands of interleaved accesses are an 4517 // example. 4518 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4519 4520 // Holds pointer operands of instructions that are possibly non-uniform. 4521 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4522 4523 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4524 InstWidening WideningDecision = getWideningDecision(I, VF); 4525 assert(WideningDecision != CM_Unknown && 4526 "Widening decision should be ready at this moment"); 4527 4528 return (WideningDecision == CM_Widen || 4529 WideningDecision == CM_Widen_Reverse || 4530 WideningDecision == CM_Interleave); 4531 }; 4532 // Iterate over the instructions in the loop, and collect all 4533 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4534 // that a consecutive-like pointer operand will be scalarized, we collect it 4535 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4536 // getelementptr instruction can be used by both vectorized and scalarized 4537 // memory instructions. For example, if a loop loads and stores from the same 4538 // location, but the store is conditional, the store will be scalarized, and 4539 // the getelementptr won't remain uniform. 4540 for (auto *BB : TheLoop->blocks()) 4541 for (auto &I : *BB) { 4542 // If there's no pointer operand, there's nothing to do. 4543 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4544 if (!Ptr) 4545 continue; 4546 4547 // True if all users of Ptr are memory accesses that have Ptr as their 4548 // pointer operand. 4549 auto UsersAreMemAccesses = 4550 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4551 return getLoadStorePointerOperand(U) == Ptr; 4552 }); 4553 4554 // Ensure the memory instruction will not be scalarized or used by 4555 // gather/scatter, making its pointer operand non-uniform. If the pointer 4556 // operand is used by any instruction other than a memory access, we 4557 // conservatively assume the pointer operand may be non-uniform. 4558 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4559 PossibleNonUniformPtrs.insert(Ptr); 4560 4561 // If the memory instruction will be vectorized and its pointer operand 4562 // is consecutive-like, or interleaving - the pointer operand should 4563 // remain uniform. 4564 else 4565 ConsecutiveLikePtrs.insert(Ptr); 4566 } 4567 4568 // Add to the Worklist all consecutive and consecutive-like pointers that 4569 // aren't also identified as possibly non-uniform. 4570 for (auto *V : ConsecutiveLikePtrs) 4571 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) { 4572 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 4573 Worklist.insert(V); 4574 } 4575 4576 // Expand Worklist in topological order: whenever a new instruction 4577 // is added , its users should be already inside Worklist. It ensures 4578 // a uniform instruction will only be used by uniform instructions. 4579 unsigned idx = 0; 4580 while (idx != Worklist.size()) { 4581 Instruction *I = Worklist[idx++]; 4582 4583 for (auto OV : I->operand_values()) { 4584 // isOutOfScope operands cannot be uniform instructions. 4585 if (isOutOfScope(OV)) 4586 continue; 4587 // First order recurrence Phi's should typically be considered 4588 // non-uniform. 4589 auto *OP = dyn_cast<PHINode>(OV); 4590 if (OP && Legal->isFirstOrderRecurrence(OP)) 4591 continue; 4592 // If all the users of the operand are uniform, then add the 4593 // operand into the uniform worklist. 4594 auto *OI = cast<Instruction>(OV); 4595 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4596 auto *J = cast<Instruction>(U); 4597 return Worklist.count(J) || 4598 (OI == getLoadStorePointerOperand(J) && 4599 isUniformDecision(J, VF)); 4600 })) { 4601 Worklist.insert(OI); 4602 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 4603 } 4604 } 4605 } 4606 4607 // Returns true if Ptr is the pointer operand of a memory access instruction 4608 // I, and I is known to not require scalarization. 4609 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4610 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4611 }; 4612 4613 // For an instruction to be added into Worklist above, all its users inside 4614 // the loop should also be in Worklist. However, this condition cannot be 4615 // true for phi nodes that form a cyclic dependence. We must process phi 4616 // nodes separately. An induction variable will remain uniform if all users 4617 // of the induction variable and induction variable update remain uniform. 4618 // The code below handles both pointer and non-pointer induction variables. 4619 for (auto &Induction : *Legal->getInductionVars()) { 4620 auto *Ind = Induction.first; 4621 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4622 4623 // Determine if all users of the induction variable are uniform after 4624 // vectorization. 4625 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4626 auto *I = cast<Instruction>(U); 4627 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4628 isVectorizedMemAccessUse(I, Ind); 4629 }); 4630 if (!UniformInd) 4631 continue; 4632 4633 // Determine if all users of the induction variable update instruction are 4634 // uniform after vectorization. 4635 auto UniformIndUpdate = 4636 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4637 auto *I = cast<Instruction>(U); 4638 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4639 isVectorizedMemAccessUse(I, IndUpdate); 4640 }); 4641 if (!UniformIndUpdate) 4642 continue; 4643 4644 // The induction variable and its update instruction will remain uniform. 4645 Worklist.insert(Ind); 4646 Worklist.insert(IndUpdate); 4647 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 4648 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate 4649 << "\n"); 4650 } 4651 4652 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4653 } 4654 4655 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) { 4656 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4657 // TODO: It may by useful to do since it's still likely to be dynamically 4658 // uniform if the target can skip. 4659 LLVM_DEBUG( 4660 dbgs() << "LV: Not inserting runtime ptr check for divergent target"); 4661 4662 ORE->emit( 4663 createMissedAnalysis("CantVersionLoopWithDivergentTarget") 4664 << "runtime pointer checks needed. Not enabled for divergent target"); 4665 4666 return None; 4667 } 4668 4669 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4670 if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize. 4671 return computeFeasibleMaxVF(OptForSize, TC); 4672 4673 if (Legal->getRuntimePointerChecking()->Need) { 4674 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4675 << "runtime pointer checks needed. Enable vectorization of this " 4676 "loop with '#pragma clang loop vectorize(enable)' when " 4677 "compiling with -Os/-Oz"); 4678 LLVM_DEBUG( 4679 dbgs() 4680 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 4681 return None; 4682 } 4683 4684 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4685 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4686 << "runtime SCEV checks needed. Enable vectorization of this " 4687 "loop with '#pragma clang loop vectorize(enable)' when " 4688 "compiling with -Os/-Oz"); 4689 LLVM_DEBUG( 4690 dbgs() 4691 << "LV: Aborting. Runtime SCEV check is required with -Os/-Oz.\n"); 4692 return None; 4693 } 4694 4695 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4696 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4697 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 4698 << "runtime stride == 1 checks needed. Enable vectorization of " 4699 "this loop with '#pragma clang loop vectorize(enable)' when " 4700 "compiling with -Os/-Oz"); 4701 LLVM_DEBUG( 4702 dbgs() 4703 << "LV: Aborting. Runtime stride check is required with -Os/-Oz.\n"); 4704 return None; 4705 } 4706 4707 // If we optimize the program for size, avoid creating the tail loop. 4708 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4709 4710 if (TC == 1) { 4711 ORE->emit(createMissedAnalysis("SingleIterationLoop") 4712 << "loop trip count is one, irrelevant for vectorization"); 4713 LLVM_DEBUG(dbgs() << "LV: Aborting, single iteration (non) loop.\n"); 4714 return None; 4715 } 4716 4717 // Record that scalar epilogue is not allowed. 4718 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4719 4720 IsScalarEpilogueAllowed = !OptForSize; 4721 4722 // We don't create an epilogue when optimizing for size. 4723 // Invalidate interleave groups that require an epilogue if we can't mask 4724 // the interleave-group. 4725 if (!useMaskedInterleavedAccesses(TTI)) 4726 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4727 4728 unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC); 4729 4730 if (TC > 0 && TC % MaxVF == 0) { 4731 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 4732 return MaxVF; 4733 } 4734 4735 // If we don't know the precise trip count, or if the trip count that we 4736 // found modulo the vectorization factor is not zero, try to fold the tail 4737 // by masking. 4738 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 4739 if (Legal->canFoldTailByMasking()) { 4740 FoldTailByMasking = true; 4741 return MaxVF; 4742 } 4743 4744 if (TC == 0) { 4745 ORE->emit( 4746 createMissedAnalysis("UnknownLoopCountComplexCFG") 4747 << "unable to calculate the loop count due to complex control flow"); 4748 return None; 4749 } 4750 4751 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 4752 << "cannot optimize for size and vectorize at the same time. " 4753 "Enable vectorization of this loop with '#pragma clang loop " 4754 "vectorize(enable)' when compiling with -Os/-Oz"); 4755 return None; 4756 } 4757 4758 unsigned 4759 LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize, 4760 unsigned ConstTripCount) { 4761 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4762 unsigned SmallestType, WidestType; 4763 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4764 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 4765 4766 // Get the maximum safe dependence distance in bits computed by LAA. 4767 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4768 // the memory accesses that is most restrictive (involved in the smallest 4769 // dependence distance). 4770 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 4771 4772 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 4773 4774 unsigned MaxVectorSize = WidestRegister / WidestType; 4775 4776 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4777 << " / " << WidestType << " bits.\n"); 4778 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 4779 << WidestRegister << " bits.\n"); 4780 4781 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 4782 " into one vector!"); 4783 if (MaxVectorSize == 0) { 4784 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 4785 MaxVectorSize = 1; 4786 return MaxVectorSize; 4787 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 4788 isPowerOf2_32(ConstTripCount)) { 4789 // We need to clamp the VF to be the ConstTripCount. There is no point in 4790 // choosing a higher viable VF as done in the loop below. 4791 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 4792 << ConstTripCount << "\n"); 4793 MaxVectorSize = ConstTripCount; 4794 return MaxVectorSize; 4795 } 4796 4797 unsigned MaxVF = MaxVectorSize; 4798 if (TTI.shouldMaximizeVectorBandwidth(OptForSize) || 4799 (MaximizeBandwidth && !OptForSize)) { 4800 // Collect all viable vectorization factors larger than the default MaxVF 4801 // (i.e. MaxVectorSize). 4802 SmallVector<unsigned, 8> VFs; 4803 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 4804 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 4805 VFs.push_back(VS); 4806 4807 // For each VF calculate its register usage. 4808 auto RUs = calculateRegisterUsage(VFs); 4809 4810 // Select the largest VF which doesn't require more registers than existing 4811 // ones. 4812 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 4813 for (int i = RUs.size() - 1; i >= 0; --i) { 4814 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 4815 MaxVF = VFs[i]; 4816 break; 4817 } 4818 } 4819 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 4820 if (MaxVF < MinVF) { 4821 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 4822 << ") with target's minimum: " << MinVF << '\n'); 4823 MaxVF = MinVF; 4824 } 4825 } 4826 } 4827 return MaxVF; 4828 } 4829 4830 VectorizationFactor 4831 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 4832 float Cost = expectedCost(1).first; 4833 const float ScalarCost = Cost; 4834 unsigned Width = 1; 4835 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 4836 4837 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 4838 if (ForceVectorization && MaxVF > 1) { 4839 // Ignore scalar width, because the user explicitly wants vectorization. 4840 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 4841 // evaluation. 4842 Cost = std::numeric_limits<float>::max(); 4843 } 4844 4845 for (unsigned i = 2; i <= MaxVF; i *= 2) { 4846 // Notice that the vector loop needs to be executed less times, so 4847 // we need to divide the cost of the vector loops by the width of 4848 // the vector elements. 4849 VectorizationCostTy C = expectedCost(i); 4850 float VectorCost = C.first / (float)i; 4851 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 4852 << " costs: " << (int)VectorCost << ".\n"); 4853 if (!C.second && !ForceVectorization) { 4854 LLVM_DEBUG( 4855 dbgs() << "LV: Not considering vector loop of width " << i 4856 << " because it will not generate any vector instructions.\n"); 4857 continue; 4858 } 4859 if (VectorCost < Cost) { 4860 Cost = VectorCost; 4861 Width = i; 4862 } 4863 } 4864 4865 if (!EnableCondStoresVectorization && NumPredStores) { 4866 ORE->emit(createMissedAnalysis("ConditionalStore") 4867 << "store that is conditionally executed prevents vectorization"); 4868 LLVM_DEBUG( 4869 dbgs() << "LV: No vectorization. There are conditional stores.\n"); 4870 Width = 1; 4871 Cost = ScalarCost; 4872 } 4873 4874 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 4875 << "LV: Vectorization seems to be not beneficial, " 4876 << "but was forced by a user.\n"); 4877 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 4878 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 4879 return Factor; 4880 } 4881 4882 std::pair<unsigned, unsigned> 4883 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 4884 unsigned MinWidth = -1U; 4885 unsigned MaxWidth = 8; 4886 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 4887 4888 // For each block. 4889 for (BasicBlock *BB : TheLoop->blocks()) { 4890 // For each instruction in the loop. 4891 for (Instruction &I : BB->instructionsWithoutDebug()) { 4892 Type *T = I.getType(); 4893 4894 // Skip ignored values. 4895 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 4896 continue; 4897 4898 // Only examine Loads, Stores and PHINodes. 4899 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 4900 continue; 4901 4902 // Examine PHI nodes that are reduction variables. Update the type to 4903 // account for the recurrence type. 4904 if (auto *PN = dyn_cast<PHINode>(&I)) { 4905 if (!Legal->isReductionVariable(PN)) 4906 continue; 4907 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 4908 T = RdxDesc.getRecurrenceType(); 4909 } 4910 4911 // Examine the stored values. 4912 if (auto *ST = dyn_cast<StoreInst>(&I)) 4913 T = ST->getValueOperand()->getType(); 4914 4915 // Ignore loaded pointer types and stored pointer types that are not 4916 // vectorizable. 4917 // 4918 // FIXME: The check here attempts to predict whether a load or store will 4919 // be vectorized. We only know this for certain after a VF has 4920 // been selected. Here, we assume that if an access can be 4921 // vectorized, it will be. We should also look at extending this 4922 // optimization to non-pointer types. 4923 // 4924 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 4925 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 4926 continue; 4927 4928 MinWidth = std::min(MinWidth, 4929 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4930 MaxWidth = std::max(MaxWidth, 4931 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 4932 } 4933 } 4934 4935 return {MinWidth, MaxWidth}; 4936 } 4937 4938 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 4939 unsigned VF, 4940 unsigned LoopCost) { 4941 // -- The interleave heuristics -- 4942 // We interleave the loop in order to expose ILP and reduce the loop overhead. 4943 // There are many micro-architectural considerations that we can't predict 4944 // at this level. For example, frontend pressure (on decode or fetch) due to 4945 // code size, or the number and capabilities of the execution ports. 4946 // 4947 // We use the following heuristics to select the interleave count: 4948 // 1. If the code has reductions, then we interleave to break the cross 4949 // iteration dependency. 4950 // 2. If the loop is really small, then we interleave to reduce the loop 4951 // overhead. 4952 // 3. We don't interleave if we think that we will spill registers to memory 4953 // due to the increased register pressure. 4954 4955 // When we optimize for size, we don't interleave. 4956 if (OptForSize) 4957 return 1; 4958 4959 // We used the distance for the interleave count. 4960 if (Legal->getMaxSafeDepDistBytes() != -1U) 4961 return 1; 4962 4963 // Do not interleave loops with a relatively small trip count. 4964 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4965 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 4966 return 1; 4967 4968 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 4969 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 4970 << " registers\n"); 4971 4972 if (VF == 1) { 4973 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 4974 TargetNumRegisters = ForceTargetNumScalarRegs; 4975 } else { 4976 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 4977 TargetNumRegisters = ForceTargetNumVectorRegs; 4978 } 4979 4980 RegisterUsage R = calculateRegisterUsage({VF})[0]; 4981 // We divide by these constants so assume that we have at least one 4982 // instruction that uses at least one register. 4983 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 4984 4985 // We calculate the interleave count using the following formula. 4986 // Subtract the number of loop invariants from the number of available 4987 // registers. These registers are used by all of the interleaved instances. 4988 // Next, divide the remaining registers by the number of registers that is 4989 // required by the loop, in order to estimate how many parallel instances 4990 // fit without causing spills. All of this is rounded down if necessary to be 4991 // a power of two. We want power of two interleave count to simplify any 4992 // addressing operations or alignment considerations. 4993 // We also want power of two interleave counts to ensure that the induction 4994 // variable of the vector loop wraps to zero, when tail is folded by masking; 4995 // this currently happens when OptForSize, in which case IC is set to 1 above. 4996 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 4997 R.MaxLocalUsers); 4998 4999 // Don't count the induction variable as interleaved. 5000 if (EnableIndVarRegisterHeur) 5001 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5002 std::max(1U, (R.MaxLocalUsers - 1))); 5003 5004 // Clamp the interleave ranges to reasonable counts. 5005 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5006 5007 // Check if the user has overridden the max. 5008 if (VF == 1) { 5009 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5010 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5011 } else { 5012 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5013 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5014 } 5015 5016 // If we did not calculate the cost for VF (because the user selected the VF) 5017 // then we calculate the cost of VF here. 5018 if (LoopCost == 0) 5019 LoopCost = expectedCost(VF).first; 5020 5021 // Clamp the calculated IC to be between the 1 and the max interleave count 5022 // that the target allows. 5023 if (IC > MaxInterleaveCount) 5024 IC = MaxInterleaveCount; 5025 else if (IC < 1) 5026 IC = 1; 5027 5028 // Interleave if we vectorized this loop and there is a reduction that could 5029 // benefit from interleaving. 5030 if (VF > 1 && !Legal->getReductionVars()->empty()) { 5031 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5032 return IC; 5033 } 5034 5035 // Note that if we've already vectorized the loop we will have done the 5036 // runtime check and so interleaving won't require further checks. 5037 bool InterleavingRequiresRuntimePointerCheck = 5038 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5039 5040 // We want to interleave small loops in order to reduce the loop overhead and 5041 // potentially expose ILP opportunities. 5042 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5043 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5044 // We assume that the cost overhead is 1 and we use the cost model 5045 // to estimate the cost of the loop and interleave until the cost of the 5046 // loop overhead is about 5% of the cost of the loop. 5047 unsigned SmallIC = 5048 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5049 5050 // Interleave until store/load ports (estimated by max interleave count) are 5051 // saturated. 5052 unsigned NumStores = Legal->getNumStores(); 5053 unsigned NumLoads = Legal->getNumLoads(); 5054 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5055 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5056 5057 // If we have a scalar reduction (vector reductions are already dealt with 5058 // by this point), we can increase the critical path length if the loop 5059 // we're interleaving is inside another loop. Limit, by default to 2, so the 5060 // critical path only gets increased by one reduction operation. 5061 if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) { 5062 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5063 SmallIC = std::min(SmallIC, F); 5064 StoresIC = std::min(StoresIC, F); 5065 LoadsIC = std::min(LoadsIC, F); 5066 } 5067 5068 if (EnableLoadStoreRuntimeInterleave && 5069 std::max(StoresIC, LoadsIC) > SmallIC) { 5070 LLVM_DEBUG( 5071 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5072 return std::max(StoresIC, LoadsIC); 5073 } 5074 5075 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5076 return SmallIC; 5077 } 5078 5079 // Interleave if this is a large loop (small loops are already dealt with by 5080 // this point) that could benefit from interleaving. 5081 bool HasReductions = !Legal->getReductionVars()->empty(); 5082 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5083 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5084 return IC; 5085 } 5086 5087 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5088 return 1; 5089 } 5090 5091 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5092 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5093 // This function calculates the register usage by measuring the highest number 5094 // of values that are alive at a single location. Obviously, this is a very 5095 // rough estimation. We scan the loop in a topological order in order and 5096 // assign a number to each instruction. We use RPO to ensure that defs are 5097 // met before their users. We assume that each instruction that has in-loop 5098 // users starts an interval. We record every time that an in-loop value is 5099 // used, so we have a list of the first and last occurrences of each 5100 // instruction. Next, we transpose this data structure into a multi map that 5101 // holds the list of intervals that *end* at a specific location. This multi 5102 // map allows us to perform a linear search. We scan the instructions linearly 5103 // and record each time that a new interval starts, by placing it in a set. 5104 // If we find this value in the multi-map then we remove it from the set. 5105 // The max register usage is the maximum size of the set. 5106 // We also search for instructions that are defined outside the loop, but are 5107 // used inside the loop. We need this number separately from the max-interval 5108 // usage number because when we unroll, loop-invariant values do not take 5109 // more register. 5110 LoopBlocksDFS DFS(TheLoop); 5111 DFS.perform(LI); 5112 5113 RegisterUsage RU; 5114 5115 // Each 'key' in the map opens a new interval. The values 5116 // of the map are the index of the 'last seen' usage of the 5117 // instruction that is the key. 5118 using IntervalMap = DenseMap<Instruction *, unsigned>; 5119 5120 // Maps instruction to its index. 5121 SmallVector<Instruction *, 64> IdxToInstr; 5122 // Marks the end of each interval. 5123 IntervalMap EndPoint; 5124 // Saves the list of instruction indices that are used in the loop. 5125 SmallPtrSet<Instruction *, 8> Ends; 5126 // Saves the list of values that are used in the loop but are 5127 // defined outside the loop, such as arguments and constants. 5128 SmallPtrSet<Value *, 8> LoopInvariants; 5129 5130 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5131 for (Instruction &I : BB->instructionsWithoutDebug()) { 5132 IdxToInstr.push_back(&I); 5133 5134 // Save the end location of each USE. 5135 for (Value *U : I.operands()) { 5136 auto *Instr = dyn_cast<Instruction>(U); 5137 5138 // Ignore non-instruction values such as arguments, constants, etc. 5139 if (!Instr) 5140 continue; 5141 5142 // If this instruction is outside the loop then record it and continue. 5143 if (!TheLoop->contains(Instr)) { 5144 LoopInvariants.insert(Instr); 5145 continue; 5146 } 5147 5148 // Overwrite previous end points. 5149 EndPoint[Instr] = IdxToInstr.size(); 5150 Ends.insert(Instr); 5151 } 5152 } 5153 } 5154 5155 // Saves the list of intervals that end with the index in 'key'. 5156 using InstrList = SmallVector<Instruction *, 2>; 5157 DenseMap<unsigned, InstrList> TransposeEnds; 5158 5159 // Transpose the EndPoints to a list of values that end at each index. 5160 for (auto &Interval : EndPoint) 5161 TransposeEnds[Interval.second].push_back(Interval.first); 5162 5163 SmallPtrSet<Instruction *, 8> OpenIntervals; 5164 5165 // Get the size of the widest register. 5166 unsigned MaxSafeDepDist = -1U; 5167 if (Legal->getMaxSafeDepDistBytes() != -1U) 5168 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5169 unsigned WidestRegister = 5170 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5171 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5172 5173 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5174 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5175 5176 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5177 5178 // A lambda that gets the register usage for the given type and VF. 5179 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5180 if (Ty->isTokenTy()) 5181 return 0U; 5182 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5183 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5184 }; 5185 5186 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5187 Instruction *I = IdxToInstr[i]; 5188 5189 // Remove all of the instructions that end at this location. 5190 InstrList &List = TransposeEnds[i]; 5191 for (Instruction *ToRemove : List) 5192 OpenIntervals.erase(ToRemove); 5193 5194 // Ignore instructions that are never used within the loop. 5195 if (Ends.find(I) == Ends.end()) 5196 continue; 5197 5198 // Skip ignored values. 5199 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5200 continue; 5201 5202 // For each VF find the maximum usage of registers. 5203 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5204 if (VFs[j] == 1) { 5205 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5206 continue; 5207 } 5208 collectUniformsAndScalars(VFs[j]); 5209 // Count the number of live intervals. 5210 unsigned RegUsage = 0; 5211 for (auto Inst : OpenIntervals) { 5212 // Skip ignored values for VF > 1. 5213 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end() || 5214 isScalarAfterVectorization(Inst, VFs[j])) 5215 continue; 5216 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5217 } 5218 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5219 } 5220 5221 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5222 << OpenIntervals.size() << '\n'); 5223 5224 // Add the current instruction to the list of open intervals. 5225 OpenIntervals.insert(I); 5226 } 5227 5228 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5229 unsigned Invariant = 0; 5230 if (VFs[i] == 1) 5231 Invariant = LoopInvariants.size(); 5232 else { 5233 for (auto Inst : LoopInvariants) 5234 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5235 } 5236 5237 LLVM_DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5238 LLVM_DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5239 LLVM_DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant 5240 << '\n'); 5241 5242 RU.LoopInvariantRegs = Invariant; 5243 RU.MaxLocalUsers = MaxUsages[i]; 5244 RUs[i] = RU; 5245 } 5246 5247 return RUs; 5248 } 5249 5250 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5251 // TODO: Cost model for emulated masked load/store is completely 5252 // broken. This hack guides the cost model to use an artificially 5253 // high enough value to practically disable vectorization with such 5254 // operations, except where previously deployed legality hack allowed 5255 // using very low cost values. This is to avoid regressions coming simply 5256 // from moving "masked load/store" check from legality to cost model. 5257 // Masked Load/Gather emulation was previously never allowed. 5258 // Limited number of Masked Store/Scatter emulation was allowed. 5259 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5260 return isa<LoadInst>(I) || 5261 (isa<StoreInst>(I) && 5262 NumPredStores > NumberOfStoresToPredicate); 5263 } 5264 5265 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5266 // If we aren't vectorizing the loop, or if we've already collected the 5267 // instructions to scalarize, there's nothing to do. Collection may already 5268 // have occurred if we have a user-selected VF and are now computing the 5269 // expected cost for interleaving. 5270 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5271 return; 5272 5273 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5274 // not profitable to scalarize any instructions, the presence of VF in the 5275 // map will indicate that we've analyzed it already. 5276 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5277 5278 // Find all the instructions that are scalar with predication in the loop and 5279 // determine if it would be better to not if-convert the blocks they are in. 5280 // If so, we also record the instructions to scalarize. 5281 for (BasicBlock *BB : TheLoop->blocks()) { 5282 if (!blockNeedsPredication(BB)) 5283 continue; 5284 for (Instruction &I : *BB) 5285 if (isScalarWithPredication(&I)) { 5286 ScalarCostsTy ScalarCosts; 5287 // Do not apply discount logic if hacked cost is needed 5288 // for emulated masked memrefs. 5289 if (!useEmulatedMaskMemRefHack(&I) && 5290 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5291 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5292 // Remember that BB will remain after vectorization. 5293 PredicatedBBsAfterVectorization.insert(BB); 5294 } 5295 } 5296 } 5297 5298 int LoopVectorizationCostModel::computePredInstDiscount( 5299 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5300 unsigned VF) { 5301 assert(!isUniformAfterVectorization(PredInst, VF) && 5302 "Instruction marked uniform-after-vectorization will be predicated"); 5303 5304 // Initialize the discount to zero, meaning that the scalar version and the 5305 // vector version cost the same. 5306 int Discount = 0; 5307 5308 // Holds instructions to analyze. The instructions we visit are mapped in 5309 // ScalarCosts. Those instructions are the ones that would be scalarized if 5310 // we find that the scalar version costs less. 5311 SmallVector<Instruction *, 8> Worklist; 5312 5313 // Returns true if the given instruction can be scalarized. 5314 auto canBeScalarized = [&](Instruction *I) -> bool { 5315 // We only attempt to scalarize instructions forming a single-use chain 5316 // from the original predicated block that would otherwise be vectorized. 5317 // Although not strictly necessary, we give up on instructions we know will 5318 // already be scalar to avoid traversing chains that are unlikely to be 5319 // beneficial. 5320 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5321 isScalarAfterVectorization(I, VF)) 5322 return false; 5323 5324 // If the instruction is scalar with predication, it will be analyzed 5325 // separately. We ignore it within the context of PredInst. 5326 if (isScalarWithPredication(I)) 5327 return false; 5328 5329 // If any of the instruction's operands are uniform after vectorization, 5330 // the instruction cannot be scalarized. This prevents, for example, a 5331 // masked load from being scalarized. 5332 // 5333 // We assume we will only emit a value for lane zero of an instruction 5334 // marked uniform after vectorization, rather than VF identical values. 5335 // Thus, if we scalarize an instruction that uses a uniform, we would 5336 // create uses of values corresponding to the lanes we aren't emitting code 5337 // for. This behavior can be changed by allowing getScalarValue to clone 5338 // the lane zero values for uniforms rather than asserting. 5339 for (Use &U : I->operands()) 5340 if (auto *J = dyn_cast<Instruction>(U.get())) 5341 if (isUniformAfterVectorization(J, VF)) 5342 return false; 5343 5344 // Otherwise, we can scalarize the instruction. 5345 return true; 5346 }; 5347 5348 // Returns true if an operand that cannot be scalarized must be extracted 5349 // from a vector. We will account for this scalarization overhead below. Note 5350 // that the non-void predicated instructions are placed in their own blocks, 5351 // and their return values are inserted into vectors. Thus, an extract would 5352 // still be required. 5353 auto needsExtract = [&](Instruction *I) -> bool { 5354 return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF); 5355 }; 5356 5357 // Compute the expected cost discount from scalarizing the entire expression 5358 // feeding the predicated instruction. We currently only consider expressions 5359 // that are single-use instruction chains. 5360 Worklist.push_back(PredInst); 5361 while (!Worklist.empty()) { 5362 Instruction *I = Worklist.pop_back_val(); 5363 5364 // If we've already analyzed the instruction, there's nothing to do. 5365 if (ScalarCosts.find(I) != ScalarCosts.end()) 5366 continue; 5367 5368 // Compute the cost of the vector instruction. Note that this cost already 5369 // includes the scalarization overhead of the predicated instruction. 5370 unsigned VectorCost = getInstructionCost(I, VF).first; 5371 5372 // Compute the cost of the scalarized instruction. This cost is the cost of 5373 // the instruction as if it wasn't if-converted and instead remained in the 5374 // predicated block. We will scale this cost by block probability after 5375 // computing the scalarization overhead. 5376 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5377 5378 // Compute the scalarization overhead of needed insertelement instructions 5379 // and phi nodes. 5380 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5381 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 5382 true, false); 5383 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5384 } 5385 5386 // Compute the scalarization overhead of needed extractelement 5387 // instructions. For each of the instruction's operands, if the operand can 5388 // be scalarized, add it to the worklist; otherwise, account for the 5389 // overhead. 5390 for (Use &U : I->operands()) 5391 if (auto *J = dyn_cast<Instruction>(U.get())) { 5392 assert(VectorType::isValidElementType(J->getType()) && 5393 "Instruction has non-scalar type"); 5394 if (canBeScalarized(J)) 5395 Worklist.push_back(J); 5396 else if (needsExtract(J)) 5397 ScalarCost += TTI.getScalarizationOverhead( 5398 ToVectorTy(J->getType(),VF), false, true); 5399 } 5400 5401 // Scale the total scalar cost by block probability. 5402 ScalarCost /= getReciprocalPredBlockProb(); 5403 5404 // Compute the discount. A non-negative discount means the vector version 5405 // of the instruction costs more, and scalarizing would be beneficial. 5406 Discount += VectorCost - ScalarCost; 5407 ScalarCosts[I] = ScalarCost; 5408 } 5409 5410 return Discount; 5411 } 5412 5413 LoopVectorizationCostModel::VectorizationCostTy 5414 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5415 VectorizationCostTy Cost; 5416 5417 // For each block. 5418 for (BasicBlock *BB : TheLoop->blocks()) { 5419 VectorizationCostTy BlockCost; 5420 5421 // For each instruction in the old loop. 5422 for (Instruction &I : BB->instructionsWithoutDebug()) { 5423 // Skip ignored values. 5424 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5425 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5426 continue; 5427 5428 VectorizationCostTy C = getInstructionCost(&I, VF); 5429 5430 // Check if we should override the cost. 5431 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5432 C.first = ForceTargetInstructionCost; 5433 5434 BlockCost.first += C.first; 5435 BlockCost.second |= C.second; 5436 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5437 << " for VF " << VF << " For instruction: " << I 5438 << '\n'); 5439 } 5440 5441 // If we are vectorizing a predicated block, it will have been 5442 // if-converted. This means that the block's instructions (aside from 5443 // stores and instructions that may divide by zero) will now be 5444 // unconditionally executed. For the scalar case, we may not always execute 5445 // the predicated block. Thus, scale the block's cost by the probability of 5446 // executing it. 5447 if (VF == 1 && blockNeedsPredication(BB)) 5448 BlockCost.first /= getReciprocalPredBlockProb(); 5449 5450 Cost.first += BlockCost.first; 5451 Cost.second |= BlockCost.second; 5452 } 5453 5454 return Cost; 5455 } 5456 5457 /// Gets Address Access SCEV after verifying that the access pattern 5458 /// is loop invariant except the induction variable dependence. 5459 /// 5460 /// This SCEV can be sent to the Target in order to estimate the address 5461 /// calculation cost. 5462 static const SCEV *getAddressAccessSCEV( 5463 Value *Ptr, 5464 LoopVectorizationLegality *Legal, 5465 PredicatedScalarEvolution &PSE, 5466 const Loop *TheLoop) { 5467 5468 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5469 if (!Gep) 5470 return nullptr; 5471 5472 // We are looking for a gep with all loop invariant indices except for one 5473 // which should be an induction variable. 5474 auto SE = PSE.getSE(); 5475 unsigned NumOperands = Gep->getNumOperands(); 5476 for (unsigned i = 1; i < NumOperands; ++i) { 5477 Value *Opd = Gep->getOperand(i); 5478 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5479 !Legal->isInductionVariable(Opd)) 5480 return nullptr; 5481 } 5482 5483 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5484 return PSE.getSCEV(Ptr); 5485 } 5486 5487 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5488 return Legal->hasStride(I->getOperand(0)) || 5489 Legal->hasStride(I->getOperand(1)); 5490 } 5491 5492 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5493 unsigned VF) { 5494 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5495 Type *ValTy = getMemInstValueType(I); 5496 auto SE = PSE.getSE(); 5497 5498 unsigned Alignment = getLoadStoreAlignment(I); 5499 unsigned AS = getLoadStoreAddressSpace(I); 5500 Value *Ptr = getLoadStorePointerOperand(I); 5501 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5502 5503 // Figure out whether the access is strided and get the stride value 5504 // if it's known in compile time 5505 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5506 5507 // Get the cost of the scalar memory instruction and address computation. 5508 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5509 5510 // Don't pass *I here, since it is scalar but will actually be part of a 5511 // vectorized loop where the user of it is a vectorized instruction. 5512 Cost += VF * 5513 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 5514 AS); 5515 5516 // Get the overhead of the extractelement and insertelement instructions 5517 // we might create due to scalarization. 5518 Cost += getScalarizationOverhead(I, VF, TTI); 5519 5520 // If we have a predicated store, it may not be executed for each vector 5521 // lane. Scale the cost by the probability of executing the predicated 5522 // block. 5523 if (isPredicatedInst(I)) { 5524 Cost /= getReciprocalPredBlockProb(); 5525 5526 if (useEmulatedMaskMemRefHack(I)) 5527 // Artificially setting to a high enough value to practically disable 5528 // vectorization with such operations. 5529 Cost = 3000000; 5530 } 5531 5532 return Cost; 5533 } 5534 5535 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5536 unsigned VF) { 5537 Type *ValTy = getMemInstValueType(I); 5538 Type *VectorTy = ToVectorTy(ValTy, VF); 5539 unsigned Alignment = getLoadStoreAlignment(I); 5540 Value *Ptr = getLoadStorePointerOperand(I); 5541 unsigned AS = getLoadStoreAddressSpace(I); 5542 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5543 5544 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5545 "Stride should be 1 or -1 for consecutive memory access"); 5546 unsigned Cost = 0; 5547 if (Legal->isMaskRequired(I)) 5548 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 5549 else 5550 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 5551 5552 bool Reverse = ConsecutiveStride < 0; 5553 if (Reverse) 5554 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5555 return Cost; 5556 } 5557 5558 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5559 unsigned VF) { 5560 Type *ValTy = getMemInstValueType(I); 5561 Type *VectorTy = ToVectorTy(ValTy, VF); 5562 unsigned Alignment = getLoadStoreAlignment(I); 5563 unsigned AS = getLoadStoreAddressSpace(I); 5564 if (isa<LoadInst>(I)) { 5565 return TTI.getAddressComputationCost(ValTy) + 5566 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 5567 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5568 } 5569 StoreInst *SI = cast<StoreInst>(I); 5570 5571 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5572 return TTI.getAddressComputationCost(ValTy) + 5573 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) + 5574 (isLoopInvariantStoreValue ? 0 : TTI.getVectorInstrCost( 5575 Instruction::ExtractElement, 5576 VectorTy, VF - 1)); 5577 } 5578 5579 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5580 unsigned VF) { 5581 Type *ValTy = getMemInstValueType(I); 5582 Type *VectorTy = ToVectorTy(ValTy, VF); 5583 unsigned Alignment = getLoadStoreAlignment(I); 5584 Value *Ptr = getLoadStorePointerOperand(I); 5585 5586 return TTI.getAddressComputationCost(VectorTy) + 5587 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5588 Legal->isMaskRequired(I), Alignment); 5589 } 5590 5591 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5592 unsigned VF) { 5593 Type *ValTy = getMemInstValueType(I); 5594 Type *VectorTy = ToVectorTy(ValTy, VF); 5595 unsigned AS = getLoadStoreAddressSpace(I); 5596 5597 auto Group = getInterleavedAccessGroup(I); 5598 assert(Group && "Fail to get an interleaved access group."); 5599 5600 unsigned InterleaveFactor = Group->getFactor(); 5601 Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5602 5603 // Holds the indices of existing members in an interleaved load group. 5604 // An interleaved store group doesn't need this as it doesn't allow gaps. 5605 SmallVector<unsigned, 4> Indices; 5606 if (isa<LoadInst>(I)) { 5607 for (unsigned i = 0; i < InterleaveFactor; i++) 5608 if (Group->getMember(i)) 5609 Indices.push_back(i); 5610 } 5611 5612 // Calculate the cost of the whole interleaved group. 5613 bool UseMaskForGaps = 5614 Group->requiresScalarEpilogue() && !IsScalarEpilogueAllowed; 5615 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5616 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5617 Group->getAlignment(), AS, Legal->isMaskRequired(I), UseMaskForGaps); 5618 5619 if (Group->isReverse()) { 5620 // TODO: Add support for reversed masked interleaved access. 5621 assert(!Legal->isMaskRequired(I) && 5622 "Reverse masked interleaved access not supported."); 5623 Cost += Group->getNumMembers() * 5624 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5625 } 5626 return Cost; 5627 } 5628 5629 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5630 unsigned VF) { 5631 // Calculate scalar cost only. Vectorization cost should be ready at this 5632 // moment. 5633 if (VF == 1) { 5634 Type *ValTy = getMemInstValueType(I); 5635 unsigned Alignment = getLoadStoreAlignment(I); 5636 unsigned AS = getLoadStoreAddressSpace(I); 5637 5638 return TTI.getAddressComputationCost(ValTy) + 5639 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 5640 } 5641 return getWideningCost(I, VF); 5642 } 5643 5644 LoopVectorizationCostModel::VectorizationCostTy 5645 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5646 // If we know that this instruction will remain uniform, check the cost of 5647 // the scalar version. 5648 if (isUniformAfterVectorization(I, VF)) 5649 VF = 1; 5650 5651 if (VF > 1 && isProfitableToScalarize(I, VF)) 5652 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5653 5654 // Forced scalars do not have any scalarization overhead. 5655 auto ForcedScalar = ForcedScalars.find(VF); 5656 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 5657 auto InstSet = ForcedScalar->second; 5658 if (InstSet.find(I) != InstSet.end()) 5659 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5660 } 5661 5662 Type *VectorTy; 5663 unsigned C = getInstructionCost(I, VF, VectorTy); 5664 5665 bool TypeNotScalarized = 5666 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 5667 return VectorizationCostTy(C, TypeNotScalarized); 5668 } 5669 5670 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 5671 if (VF == 1) 5672 return; 5673 NumPredStores = 0; 5674 for (BasicBlock *BB : TheLoop->blocks()) { 5675 // For each instruction in the old loop. 5676 for (Instruction &I : *BB) { 5677 Value *Ptr = getLoadStorePointerOperand(&I); 5678 if (!Ptr) 5679 continue; 5680 5681 // TODO: We should generate better code and update the cost model for 5682 // predicated uniform stores. Today they are treated as any other 5683 // predicated store (see added test cases in 5684 // invariant-store-vectorization.ll). 5685 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 5686 NumPredStores++; 5687 5688 if (Legal->isUniform(Ptr) && 5689 // Conditional loads and stores should be scalarized and predicated. 5690 // isScalarWithPredication cannot be used here since masked 5691 // gather/scatters are not considered scalar with predication. 5692 !Legal->blockNeedsPredication(I.getParent())) { 5693 // TODO: Avoid replicating loads and stores instead of 5694 // relying on instcombine to remove them. 5695 // Load: Scalar load + broadcast 5696 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 5697 unsigned Cost = getUniformMemOpCost(&I, VF); 5698 setWideningDecision(&I, VF, CM_Scalarize, Cost); 5699 continue; 5700 } 5701 5702 // We assume that widening is the best solution when possible. 5703 if (memoryInstructionCanBeWidened(&I, VF)) { 5704 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 5705 int ConsecutiveStride = 5706 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 5707 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5708 "Expected consecutive stride."); 5709 InstWidening Decision = 5710 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 5711 setWideningDecision(&I, VF, Decision, Cost); 5712 continue; 5713 } 5714 5715 // Choose between Interleaving, Gather/Scatter or Scalarization. 5716 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 5717 unsigned NumAccesses = 1; 5718 if (isAccessInterleaved(&I)) { 5719 auto Group = getInterleavedAccessGroup(&I); 5720 assert(Group && "Fail to get an interleaved access group."); 5721 5722 // Make one decision for the whole group. 5723 if (getWideningDecision(&I, VF) != CM_Unknown) 5724 continue; 5725 5726 NumAccesses = Group->getNumMembers(); 5727 if (interleavedAccessCanBeWidened(&I, VF)) 5728 InterleaveCost = getInterleaveGroupCost(&I, VF); 5729 } 5730 5731 unsigned GatherScatterCost = 5732 isLegalGatherOrScatter(&I) 5733 ? getGatherScatterCost(&I, VF) * NumAccesses 5734 : std::numeric_limits<unsigned>::max(); 5735 5736 unsigned ScalarizationCost = 5737 getMemInstScalarizationCost(&I, VF) * NumAccesses; 5738 5739 // Choose better solution for the current VF, 5740 // write down this decision and use it during vectorization. 5741 unsigned Cost; 5742 InstWidening Decision; 5743 if (InterleaveCost <= GatherScatterCost && 5744 InterleaveCost < ScalarizationCost) { 5745 Decision = CM_Interleave; 5746 Cost = InterleaveCost; 5747 } else if (GatherScatterCost < ScalarizationCost) { 5748 Decision = CM_GatherScatter; 5749 Cost = GatherScatterCost; 5750 } else { 5751 Decision = CM_Scalarize; 5752 Cost = ScalarizationCost; 5753 } 5754 // If the instructions belongs to an interleave group, the whole group 5755 // receives the same decision. The whole group receives the cost, but 5756 // the cost will actually be assigned to one instruction. 5757 if (auto Group = getInterleavedAccessGroup(&I)) 5758 setWideningDecision(Group, VF, Decision, Cost); 5759 else 5760 setWideningDecision(&I, VF, Decision, Cost); 5761 } 5762 } 5763 5764 // Make sure that any load of address and any other address computation 5765 // remains scalar unless there is gather/scatter support. This avoids 5766 // inevitable extracts into address registers, and also has the benefit of 5767 // activating LSR more, since that pass can't optimize vectorized 5768 // addresses. 5769 if (TTI.prefersVectorizedAddressing()) 5770 return; 5771 5772 // Start with all scalar pointer uses. 5773 SmallPtrSet<Instruction *, 8> AddrDefs; 5774 for (BasicBlock *BB : TheLoop->blocks()) 5775 for (Instruction &I : *BB) { 5776 Instruction *PtrDef = 5777 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 5778 if (PtrDef && TheLoop->contains(PtrDef) && 5779 getWideningDecision(&I, VF) != CM_GatherScatter) 5780 AddrDefs.insert(PtrDef); 5781 } 5782 5783 // Add all instructions used to generate the addresses. 5784 SmallVector<Instruction *, 4> Worklist; 5785 for (auto *I : AddrDefs) 5786 Worklist.push_back(I); 5787 while (!Worklist.empty()) { 5788 Instruction *I = Worklist.pop_back_val(); 5789 for (auto &Op : I->operands()) 5790 if (auto *InstOp = dyn_cast<Instruction>(Op)) 5791 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 5792 AddrDefs.insert(InstOp).second) 5793 Worklist.push_back(InstOp); 5794 } 5795 5796 for (auto *I : AddrDefs) { 5797 if (isa<LoadInst>(I)) { 5798 // Setting the desired widening decision should ideally be handled in 5799 // by cost functions, but since this involves the task of finding out 5800 // if the loaded register is involved in an address computation, it is 5801 // instead changed here when we know this is the case. 5802 InstWidening Decision = getWideningDecision(I, VF); 5803 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 5804 // Scalarize a widened load of address. 5805 setWideningDecision(I, VF, CM_Scalarize, 5806 (VF * getMemoryInstructionCost(I, 1))); 5807 else if (auto Group = getInterleavedAccessGroup(I)) { 5808 // Scalarize an interleave group of address loads. 5809 for (unsigned I = 0; I < Group->getFactor(); ++I) { 5810 if (Instruction *Member = Group->getMember(I)) 5811 setWideningDecision(Member, VF, CM_Scalarize, 5812 (VF * getMemoryInstructionCost(Member, 1))); 5813 } 5814 } 5815 } else 5816 // Make sure I gets scalarized and a cost estimate without 5817 // scalarization overhead. 5818 ForcedScalars[VF].insert(I); 5819 } 5820 } 5821 5822 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 5823 unsigned VF, 5824 Type *&VectorTy) { 5825 Type *RetTy = I->getType(); 5826 if (canTruncateToMinimalBitwidth(I, VF)) 5827 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 5828 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 5829 auto SE = PSE.getSE(); 5830 5831 // TODO: We need to estimate the cost of intrinsic calls. 5832 switch (I->getOpcode()) { 5833 case Instruction::GetElementPtr: 5834 // We mark this instruction as zero-cost because the cost of GEPs in 5835 // vectorized code depends on whether the corresponding memory instruction 5836 // is scalarized or not. Therefore, we handle GEPs with the memory 5837 // instruction cost. 5838 return 0; 5839 case Instruction::Br: { 5840 // In cases of scalarized and predicated instructions, there will be VF 5841 // predicated blocks in the vectorized loop. Each branch around these 5842 // blocks requires also an extract of its vector compare i1 element. 5843 bool ScalarPredicatedBB = false; 5844 BranchInst *BI = cast<BranchInst>(I); 5845 if (VF > 1 && BI->isConditional() && 5846 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 5847 PredicatedBBsAfterVectorization.end() || 5848 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 5849 PredicatedBBsAfterVectorization.end())) 5850 ScalarPredicatedBB = true; 5851 5852 if (ScalarPredicatedBB) { 5853 // Return cost for branches around scalarized and predicated blocks. 5854 Type *Vec_i1Ty = 5855 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 5856 return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) + 5857 (TTI.getCFInstrCost(Instruction::Br) * VF)); 5858 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 5859 // The back-edge branch will remain, as will all scalar branches. 5860 return TTI.getCFInstrCost(Instruction::Br); 5861 else 5862 // This branch will be eliminated by if-conversion. 5863 return 0; 5864 // Note: We currently assume zero cost for an unconditional branch inside 5865 // a predicated block since it will become a fall-through, although we 5866 // may decide in the future to call TTI for all branches. 5867 } 5868 case Instruction::PHI: { 5869 auto *Phi = cast<PHINode>(I); 5870 5871 // First-order recurrences are replaced by vector shuffles inside the loop. 5872 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 5873 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 5874 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5875 VectorTy, VF - 1, VectorType::get(RetTy, 1)); 5876 5877 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 5878 // converted into select instructions. We require N - 1 selects per phi 5879 // node, where N is the number of incoming values. 5880 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 5881 return (Phi->getNumIncomingValues() - 1) * 5882 TTI.getCmpSelInstrCost( 5883 Instruction::Select, ToVectorTy(Phi->getType(), VF), 5884 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 5885 5886 return TTI.getCFInstrCost(Instruction::PHI); 5887 } 5888 case Instruction::UDiv: 5889 case Instruction::SDiv: 5890 case Instruction::URem: 5891 case Instruction::SRem: 5892 // If we have a predicated instruction, it may not be executed for each 5893 // vector lane. Get the scalarization cost and scale this amount by the 5894 // probability of executing the predicated block. If the instruction is not 5895 // predicated, we fall through to the next case. 5896 if (VF > 1 && isScalarWithPredication(I)) { 5897 unsigned Cost = 0; 5898 5899 // These instructions have a non-void type, so account for the phi nodes 5900 // that we will create. This cost is likely to be zero. The phi node 5901 // cost, if any, should be scaled by the block probability because it 5902 // models a copy at the end of each predicated block. 5903 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 5904 5905 // The cost of the non-predicated instruction. 5906 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 5907 5908 // The cost of insertelement and extractelement instructions needed for 5909 // scalarization. 5910 Cost += getScalarizationOverhead(I, VF, TTI); 5911 5912 // Scale the cost by the probability of executing the predicated blocks. 5913 // This assumes the predicated block for each vector lane is equally 5914 // likely. 5915 return Cost / getReciprocalPredBlockProb(); 5916 } 5917 LLVM_FALLTHROUGH; 5918 case Instruction::Add: 5919 case Instruction::FAdd: 5920 case Instruction::Sub: 5921 case Instruction::FSub: 5922 case Instruction::Mul: 5923 case Instruction::FMul: 5924 case Instruction::FDiv: 5925 case Instruction::FRem: 5926 case Instruction::Shl: 5927 case Instruction::LShr: 5928 case Instruction::AShr: 5929 case Instruction::And: 5930 case Instruction::Or: 5931 case Instruction::Xor: { 5932 // Since we will replace the stride by 1 the multiplication should go away. 5933 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 5934 return 0; 5935 // Certain instructions can be cheaper to vectorize if they have a constant 5936 // second vector operand. One example of this are shifts on x86. 5937 Value *Op2 = I->getOperand(1); 5938 TargetTransformInfo::OperandValueProperties Op2VP; 5939 TargetTransformInfo::OperandValueKind Op2VK = 5940 TTI.getOperandInfo(Op2, Op2VP); 5941 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 5942 Op2VK = TargetTransformInfo::OK_UniformValue; 5943 5944 SmallVector<const Value *, 4> Operands(I->operand_values()); 5945 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 5946 return N * TTI.getArithmeticInstrCost( 5947 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 5948 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands); 5949 } 5950 case Instruction::Select: { 5951 SelectInst *SI = cast<SelectInst>(I); 5952 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 5953 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 5954 Type *CondTy = SI->getCondition()->getType(); 5955 if (!ScalarCond) 5956 CondTy = VectorType::get(CondTy, VF); 5957 5958 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 5959 } 5960 case Instruction::ICmp: 5961 case Instruction::FCmp: { 5962 Type *ValTy = I->getOperand(0)->getType(); 5963 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 5964 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 5965 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 5966 VectorTy = ToVectorTy(ValTy, VF); 5967 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 5968 } 5969 case Instruction::Store: 5970 case Instruction::Load: { 5971 unsigned Width = VF; 5972 if (Width > 1) { 5973 InstWidening Decision = getWideningDecision(I, Width); 5974 assert(Decision != CM_Unknown && 5975 "CM decision should be taken at this point"); 5976 if (Decision == CM_Scalarize) 5977 Width = 1; 5978 } 5979 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 5980 return getMemoryInstructionCost(I, VF); 5981 } 5982 case Instruction::ZExt: 5983 case Instruction::SExt: 5984 case Instruction::FPToUI: 5985 case Instruction::FPToSI: 5986 case Instruction::FPExt: 5987 case Instruction::PtrToInt: 5988 case Instruction::IntToPtr: 5989 case Instruction::SIToFP: 5990 case Instruction::UIToFP: 5991 case Instruction::Trunc: 5992 case Instruction::FPTrunc: 5993 case Instruction::BitCast: { 5994 // We optimize the truncation of induction variables having constant 5995 // integer steps. The cost of these truncations is the same as the scalar 5996 // operation. 5997 if (isOptimizableIVTruncate(I, VF)) { 5998 auto *Trunc = cast<TruncInst>(I); 5999 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 6000 Trunc->getSrcTy(), Trunc); 6001 } 6002 6003 Type *SrcScalarTy = I->getOperand(0)->getType(); 6004 Type *SrcVecTy = 6005 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 6006 if (canTruncateToMinimalBitwidth(I, VF)) { 6007 // This cast is going to be shrunk. This may remove the cast or it might 6008 // turn it into slightly different cast. For example, if MinBW == 16, 6009 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6010 // 6011 // Calculate the modified src and dest types. 6012 Type *MinVecTy = VectorTy; 6013 if (I->getOpcode() == Instruction::Trunc) { 6014 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6015 VectorTy = 6016 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6017 } else if (I->getOpcode() == Instruction::ZExt || 6018 I->getOpcode() == Instruction::SExt) { 6019 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6020 VectorTy = 6021 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6022 } 6023 } 6024 6025 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6026 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 6027 } 6028 case Instruction::Call: { 6029 bool NeedToScalarize; 6030 CallInst *CI = cast<CallInst>(I); 6031 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6032 if (getVectorIntrinsicIDForCall(CI, TLI)) 6033 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6034 return CallCost; 6035 } 6036 default: 6037 // The cost of executing VF copies of the scalar instruction. This opcode 6038 // is unknown. Assume that it is the same as 'mul'. 6039 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6040 getScalarizationOverhead(I, VF, TTI); 6041 } // end of switch. 6042 } 6043 6044 char LoopVectorize::ID = 0; 6045 6046 static const char lv_name[] = "Loop Vectorization"; 6047 6048 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6049 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6050 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6051 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6052 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6053 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6054 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6055 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6056 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6057 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6058 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6059 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6060 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6061 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 6062 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6063 6064 namespace llvm { 6065 6066 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6067 bool VectorizeOnlyWhenForced) { 6068 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6069 } 6070 6071 } // end namespace llvm 6072 6073 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6074 // Check if the pointer operand of a load or store instruction is 6075 // consecutive. 6076 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6077 return Legal->isConsecutivePtr(Ptr); 6078 return false; 6079 } 6080 6081 void LoopVectorizationCostModel::collectValuesToIgnore() { 6082 // Ignore ephemeral values. 6083 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6084 6085 // Ignore type-promoting instructions we identified during reduction 6086 // detection. 6087 for (auto &Reduction : *Legal->getReductionVars()) { 6088 RecurrenceDescriptor &RedDes = Reduction.second; 6089 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6090 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6091 } 6092 // Ignore type-casting instructions we identified during induction 6093 // detection. 6094 for (auto &Induction : *Legal->getInductionVars()) { 6095 InductionDescriptor &IndDes = Induction.second; 6096 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6097 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6098 } 6099 } 6100 6101 // TODO: we could return a pair of values that specify the max VF and 6102 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 6103 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 6104 // doesn't have a cost model that can choose which plan to execute if 6105 // more than one is generated. 6106 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 6107 LoopVectorizationCostModel &CM) { 6108 unsigned WidestType; 6109 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 6110 return WidestVectorRegBits / WidestType; 6111 } 6112 6113 VectorizationFactor 6114 LoopVectorizationPlanner::planInVPlanNativePath(bool OptForSize, 6115 unsigned UserVF) { 6116 unsigned VF = UserVF; 6117 // Outer loop handling: They may require CFG and instruction level 6118 // transformations before even evaluating whether vectorization is profitable. 6119 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6120 // the vectorization pipeline. 6121 if (!OrigLoop->empty()) { 6122 // If the user doesn't provide a vectorization factor, determine a 6123 // reasonable one. 6124 if (!UserVF) { 6125 VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM); 6126 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 6127 6128 // Make sure we have a VF > 1 for stress testing. 6129 if (VPlanBuildStressTest && VF < 2) { 6130 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 6131 << "overriding computed VF.\n"); 6132 VF = 4; 6133 } 6134 } 6135 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6136 assert(isPowerOf2_32(VF) && "VF needs to be a power of two"); 6137 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF 6138 << " to build VPlans.\n"); 6139 buildVPlans(VF, VF); 6140 6141 // For VPlan build stress testing, we bail out after VPlan construction. 6142 if (VPlanBuildStressTest) 6143 return VectorizationFactor::Disabled(); 6144 6145 return {VF, 0}; 6146 } 6147 6148 LLVM_DEBUG( 6149 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6150 "VPlan-native path.\n"); 6151 return VectorizationFactor::Disabled(); 6152 } 6153 6154 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(bool OptForSize, 6155 unsigned UserVF) { 6156 assert(OrigLoop->empty() && "Inner loop expected."); 6157 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize); 6158 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 6159 return None; 6160 6161 // Invalidate interleave groups if all blocks of loop will be predicated. 6162 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6163 !useMaskedInterleavedAccesses(*TTI)) { 6164 LLVM_DEBUG( 6165 dbgs() 6166 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6167 "which requires masked-interleaved support.\n"); 6168 CM.InterleaveInfo.reset(); 6169 } 6170 6171 if (UserVF) { 6172 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6173 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6174 // Collect the instructions (and their associated costs) that will be more 6175 // profitable to scalarize. 6176 CM.selectUserVectorizationFactor(UserVF); 6177 buildVPlansWithVPRecipes(UserVF, UserVF); 6178 LLVM_DEBUG(printPlans(dbgs())); 6179 return {{UserVF, 0}}; 6180 } 6181 6182 unsigned MaxVF = MaybeMaxVF.getValue(); 6183 assert(MaxVF != 0 && "MaxVF is zero."); 6184 6185 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6186 // Collect Uniform and Scalar instructions after vectorization with VF. 6187 CM.collectUniformsAndScalars(VF); 6188 6189 // Collect the instructions (and their associated costs) that will be more 6190 // profitable to scalarize. 6191 if (VF > 1) 6192 CM.collectInstsToScalarize(VF); 6193 } 6194 6195 buildVPlansWithVPRecipes(1, MaxVF); 6196 LLVM_DEBUG(printPlans(dbgs())); 6197 if (MaxVF == 1) 6198 return VectorizationFactor::Disabled(); 6199 6200 // Select the optimal vectorization factor. 6201 return CM.selectVectorizationFactor(MaxVF); 6202 } 6203 6204 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6205 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6206 << '\n'); 6207 BestVF = VF; 6208 BestUF = UF; 6209 6210 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6211 return !Plan->hasVF(VF); 6212 }); 6213 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6214 } 6215 6216 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6217 DominatorTree *DT) { 6218 // Perform the actual loop transformation. 6219 6220 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6221 VPCallbackILV CallbackILV(ILV); 6222 6223 VPTransformState State{BestVF, BestUF, LI, 6224 DT, ILV.Builder, ILV.VectorLoopValueMap, 6225 &ILV, CallbackILV}; 6226 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6227 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6228 6229 //===------------------------------------------------===// 6230 // 6231 // Notice: any optimization or new instruction that go 6232 // into the code below should also be implemented in 6233 // the cost-model. 6234 // 6235 //===------------------------------------------------===// 6236 6237 // 2. Copy and widen instructions from the old loop into the new loop. 6238 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6239 VPlans.front()->execute(&State); 6240 6241 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6242 // predication, updating analyses. 6243 ILV.fixVectorizedLoop(); 6244 } 6245 6246 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6247 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6248 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6249 6250 // We create new control-flow for the vectorized loop, so the original 6251 // condition will be dead after vectorization if it's only used by the 6252 // branch. 6253 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6254 if (Cmp && Cmp->hasOneUse()) 6255 DeadInstructions.insert(Cmp); 6256 6257 // We create new "steps" for induction variable updates to which the original 6258 // induction variables map. An original update instruction will be dead if 6259 // all its users except the induction variable are dead. 6260 for (auto &Induction : *Legal->getInductionVars()) { 6261 PHINode *Ind = Induction.first; 6262 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6263 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6264 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6265 DeadInstructions.end(); 6266 })) 6267 DeadInstructions.insert(IndUpdate); 6268 6269 // We record as "Dead" also the type-casting instructions we had identified 6270 // during induction analysis. We don't need any handling for them in the 6271 // vectorized loop because we have proven that, under a proper runtime 6272 // test guarding the vectorized loop, the value of the phi, and the casted 6273 // value of the phi, are the same. The last instruction in this casting chain 6274 // will get its scalar/vector/widened def from the scalar/vector/widened def 6275 // of the respective phi node. Any other casts in the induction def-use chain 6276 // have no other uses outside the phi update chain, and will be ignored. 6277 InductionDescriptor &IndDes = Induction.second; 6278 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6279 DeadInstructions.insert(Casts.begin(), Casts.end()); 6280 } 6281 } 6282 6283 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6284 6285 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6286 6287 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6288 Instruction::BinaryOps BinOp) { 6289 // When unrolling and the VF is 1, we only need to add a simple scalar. 6290 Type *Ty = Val->getType(); 6291 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6292 6293 if (Ty->isFloatingPointTy()) { 6294 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6295 6296 // Floating point operations had to be 'fast' to enable the unrolling. 6297 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6298 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6299 } 6300 Constant *C = ConstantInt::get(Ty, StartIdx); 6301 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6302 } 6303 6304 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6305 SmallVector<Metadata *, 4> MDs; 6306 // Reserve first location for self reference to the LoopID metadata node. 6307 MDs.push_back(nullptr); 6308 bool IsUnrollMetadata = false; 6309 MDNode *LoopID = L->getLoopID(); 6310 if (LoopID) { 6311 // First find existing loop unrolling disable metadata. 6312 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6313 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6314 if (MD) { 6315 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6316 IsUnrollMetadata = 6317 S && S->getString().startswith("llvm.loop.unroll.disable"); 6318 } 6319 MDs.push_back(LoopID->getOperand(i)); 6320 } 6321 } 6322 6323 if (!IsUnrollMetadata) { 6324 // Add runtime unroll disable metadata. 6325 LLVMContext &Context = L->getHeader()->getContext(); 6326 SmallVector<Metadata *, 1> DisableOperands; 6327 DisableOperands.push_back( 6328 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6329 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6330 MDs.push_back(DisableNode); 6331 MDNode *NewLoopID = MDNode::get(Context, MDs); 6332 // Set operand 0 to refer to the loop id itself. 6333 NewLoopID->replaceOperandWith(0, NewLoopID); 6334 L->setLoopID(NewLoopID); 6335 } 6336 } 6337 6338 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6339 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6340 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6341 bool PredicateAtRangeStart = Predicate(Range.Start); 6342 6343 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6344 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6345 Range.End = TmpVF; 6346 break; 6347 } 6348 6349 return PredicateAtRangeStart; 6350 } 6351 6352 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6353 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6354 /// of VF's starting at a given VF and extending it as much as possible. Each 6355 /// vectorization decision can potentially shorten this sub-range during 6356 /// buildVPlan(). 6357 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6358 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6359 VFRange SubRange = {VF, MaxVF + 1}; 6360 VPlans.push_back(buildVPlan(SubRange)); 6361 VF = SubRange.End; 6362 } 6363 } 6364 6365 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6366 VPlanPtr &Plan) { 6367 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6368 6369 // Look for cached value. 6370 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6371 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6372 if (ECEntryIt != EdgeMaskCache.end()) 6373 return ECEntryIt->second; 6374 6375 VPValue *SrcMask = createBlockInMask(Src, Plan); 6376 6377 // The terminator has to be a branch inst! 6378 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6379 assert(BI && "Unexpected terminator found"); 6380 6381 if (!BI->isConditional()) 6382 return EdgeMaskCache[Edge] = SrcMask; 6383 6384 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6385 assert(EdgeMask && "No Edge Mask found for condition"); 6386 6387 if (BI->getSuccessor(0) != Dst) 6388 EdgeMask = Builder.createNot(EdgeMask); 6389 6390 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6391 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6392 6393 return EdgeMaskCache[Edge] = EdgeMask; 6394 } 6395 6396 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6397 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6398 6399 // Look for cached value. 6400 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6401 if (BCEntryIt != BlockMaskCache.end()) 6402 return BCEntryIt->second; 6403 6404 // All-one mask is modelled as no-mask following the convention for masked 6405 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6406 VPValue *BlockMask = nullptr; 6407 6408 if (OrigLoop->getHeader() == BB) { 6409 if (!CM.blockNeedsPredication(BB)) 6410 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6411 6412 // Introduce the early-exit compare IV <= BTC to form header block mask. 6413 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6414 VPValue *IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6415 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6416 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6417 return BlockMaskCache[BB] = BlockMask; 6418 } 6419 6420 // This is the block mask. We OR all incoming edges. 6421 for (auto *Predecessor : predecessors(BB)) { 6422 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6423 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6424 return BlockMaskCache[BB] = EdgeMask; 6425 6426 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6427 BlockMask = EdgeMask; 6428 continue; 6429 } 6430 6431 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6432 } 6433 6434 return BlockMaskCache[BB] = BlockMask; 6435 } 6436 6437 VPInterleaveRecipe *VPRecipeBuilder::tryToInterleaveMemory(Instruction *I, 6438 VFRange &Range, 6439 VPlanPtr &Plan) { 6440 const InterleaveGroup<Instruction> *IG = CM.getInterleavedAccessGroup(I); 6441 if (!IG) 6442 return nullptr; 6443 6444 // Now check if IG is relevant for VF's in the given range. 6445 auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> { 6446 return [=](unsigned VF) -> bool { 6447 return (VF >= 2 && // Query is illegal for VF == 1 6448 CM.getWideningDecision(I, VF) == 6449 LoopVectorizationCostModel::CM_Interleave); 6450 }; 6451 }; 6452 if (!LoopVectorizationPlanner::getDecisionAndClampRange(isIGMember(I), Range)) 6453 return nullptr; 6454 6455 // I is a member of an InterleaveGroup for VF's in the (possibly trimmed) 6456 // range. If it's the primary member of the IG construct a VPInterleaveRecipe. 6457 // Otherwise, it's an adjunct member of the IG, do not construct any Recipe. 6458 assert(I == IG->getInsertPos() && 6459 "Generating a recipe for an adjunct member of an interleave group"); 6460 6461 VPValue *Mask = nullptr; 6462 if (Legal->isMaskRequired(I)) 6463 Mask = createBlockInMask(I->getParent(), Plan); 6464 6465 return new VPInterleaveRecipe(IG, Mask); 6466 } 6467 6468 VPWidenMemoryInstructionRecipe * 6469 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6470 VPlanPtr &Plan) { 6471 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 6472 return nullptr; 6473 6474 auto willWiden = [&](unsigned VF) -> bool { 6475 if (VF == 1) 6476 return false; 6477 if (CM.isScalarAfterVectorization(I, VF) || 6478 CM.isProfitableToScalarize(I, VF)) 6479 return false; 6480 LoopVectorizationCostModel::InstWidening Decision = 6481 CM.getWideningDecision(I, VF); 6482 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6483 "CM decision should be taken at this point."); 6484 assert(Decision != LoopVectorizationCostModel::CM_Interleave && 6485 "Interleave memory opportunity should be caught earlier."); 6486 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6487 }; 6488 6489 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6490 return nullptr; 6491 6492 VPValue *Mask = nullptr; 6493 if (Legal->isMaskRequired(I)) 6494 Mask = createBlockInMask(I->getParent(), Plan); 6495 6496 return new VPWidenMemoryInstructionRecipe(*I, Mask); 6497 } 6498 6499 VPWidenIntOrFpInductionRecipe * 6500 VPRecipeBuilder::tryToOptimizeInduction(Instruction *I, VFRange &Range) { 6501 if (PHINode *Phi = dyn_cast<PHINode>(I)) { 6502 // Check if this is an integer or fp induction. If so, build the recipe that 6503 // produces its scalar and vector values. 6504 InductionDescriptor II = Legal->getInductionVars()->lookup(Phi); 6505 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6506 II.getKind() == InductionDescriptor::IK_FpInduction) 6507 return new VPWidenIntOrFpInductionRecipe(Phi); 6508 6509 return nullptr; 6510 } 6511 6512 // Optimize the special case where the source is a constant integer 6513 // induction variable. Notice that we can only optimize the 'trunc' case 6514 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6515 // (c) other casts depend on pointer size. 6516 6517 // Determine whether \p K is a truncation based on an induction variable that 6518 // can be optimized. 6519 auto isOptimizableIVTruncate = 6520 [&](Instruction *K) -> std::function<bool(unsigned)> { 6521 return 6522 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6523 }; 6524 6525 if (isa<TruncInst>(I) && LoopVectorizationPlanner::getDecisionAndClampRange( 6526 isOptimizableIVTruncate(I), Range)) 6527 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6528 cast<TruncInst>(I)); 6529 return nullptr; 6530 } 6531 6532 VPBlendRecipe *VPRecipeBuilder::tryToBlend(Instruction *I, VPlanPtr &Plan) { 6533 PHINode *Phi = dyn_cast<PHINode>(I); 6534 if (!Phi || Phi->getParent() == OrigLoop->getHeader()) 6535 return nullptr; 6536 6537 // We know that all PHIs in non-header blocks are converted into selects, so 6538 // we don't have to worry about the insertion order and we can just use the 6539 // builder. At this point we generate the predication tree. There may be 6540 // duplications since this is a simple recursive scan, but future 6541 // optimizations will clean it up. 6542 6543 SmallVector<VPValue *, 2> Masks; 6544 unsigned NumIncoming = Phi->getNumIncomingValues(); 6545 for (unsigned In = 0; In < NumIncoming; In++) { 6546 VPValue *EdgeMask = 6547 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6548 assert((EdgeMask || NumIncoming == 1) && 6549 "Multiple predecessors with one having a full mask"); 6550 if (EdgeMask) 6551 Masks.push_back(EdgeMask); 6552 } 6553 return new VPBlendRecipe(Phi, Masks); 6554 } 6555 6556 bool VPRecipeBuilder::tryToWiden(Instruction *I, VPBasicBlock *VPBB, 6557 VFRange &Range) { 6558 6559 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6560 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6561 6562 if (IsPredicated) 6563 return false; 6564 6565 auto IsVectorizableOpcode = [](unsigned Opcode) { 6566 switch (Opcode) { 6567 case Instruction::Add: 6568 case Instruction::And: 6569 case Instruction::AShr: 6570 case Instruction::BitCast: 6571 case Instruction::Br: 6572 case Instruction::Call: 6573 case Instruction::FAdd: 6574 case Instruction::FCmp: 6575 case Instruction::FDiv: 6576 case Instruction::FMul: 6577 case Instruction::FPExt: 6578 case Instruction::FPToSI: 6579 case Instruction::FPToUI: 6580 case Instruction::FPTrunc: 6581 case Instruction::FRem: 6582 case Instruction::FSub: 6583 case Instruction::GetElementPtr: 6584 case Instruction::ICmp: 6585 case Instruction::IntToPtr: 6586 case Instruction::Load: 6587 case Instruction::LShr: 6588 case Instruction::Mul: 6589 case Instruction::Or: 6590 case Instruction::PHI: 6591 case Instruction::PtrToInt: 6592 case Instruction::SDiv: 6593 case Instruction::Select: 6594 case Instruction::SExt: 6595 case Instruction::Shl: 6596 case Instruction::SIToFP: 6597 case Instruction::SRem: 6598 case Instruction::Store: 6599 case Instruction::Sub: 6600 case Instruction::Trunc: 6601 case Instruction::UDiv: 6602 case Instruction::UIToFP: 6603 case Instruction::URem: 6604 case Instruction::Xor: 6605 case Instruction::ZExt: 6606 return true; 6607 } 6608 return false; 6609 }; 6610 6611 if (!IsVectorizableOpcode(I->getOpcode())) 6612 return false; 6613 6614 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6615 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6616 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6617 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6618 return false; 6619 } 6620 6621 auto willWiden = [&](unsigned VF) -> bool { 6622 if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) || 6623 CM.isProfitableToScalarize(I, VF))) 6624 return false; 6625 if (CallInst *CI = dyn_cast<CallInst>(I)) { 6626 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6627 // The following case may be scalarized depending on the VF. 6628 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6629 // version of the instruction. 6630 // Is it beneficial to perform intrinsic call compared to lib call? 6631 bool NeedToScalarize; 6632 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 6633 bool UseVectorIntrinsic = 6634 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 6635 return UseVectorIntrinsic || !NeedToScalarize; 6636 } 6637 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 6638 assert(CM.getWideningDecision(I, VF) == 6639 LoopVectorizationCostModel::CM_Scalarize && 6640 "Memory widening decisions should have been taken care by now"); 6641 return false; 6642 } 6643 return true; 6644 }; 6645 6646 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6647 return false; 6648 6649 // Success: widen this instruction. We optimize the common case where 6650 // consecutive instructions can be represented by a single recipe. 6651 if (!VPBB->empty()) { 6652 VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back()); 6653 if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I)) 6654 return true; 6655 } 6656 6657 VPBB->appendRecipe(new VPWidenRecipe(I)); 6658 return true; 6659 } 6660 6661 VPBasicBlock *VPRecipeBuilder::handleReplication( 6662 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 6663 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 6664 VPlanPtr &Plan) { 6665 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 6666 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 6667 Range); 6668 6669 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6670 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 6671 6672 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 6673 6674 // Find if I uses a predicated instruction. If so, it will use its scalar 6675 // value. Avoid hoisting the insert-element which packs the scalar value into 6676 // a vector value, as that happens iff all users use the vector value. 6677 for (auto &Op : I->operands()) 6678 if (auto *PredInst = dyn_cast<Instruction>(Op)) 6679 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 6680 PredInst2Recipe[PredInst]->setAlsoPack(false); 6681 6682 // Finalize the recipe for Instr, first if it is not predicated. 6683 if (!IsPredicated) { 6684 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 6685 VPBB->appendRecipe(Recipe); 6686 return VPBB; 6687 } 6688 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 6689 assert(VPBB->getSuccessors().empty() && 6690 "VPBB has successors when handling predicated replication."); 6691 // Record predicated instructions for above packing optimizations. 6692 PredInst2Recipe[I] = Recipe; 6693 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 6694 VPBlockUtils::insertBlockAfter(Region, VPBB); 6695 auto *RegSucc = new VPBasicBlock(); 6696 VPBlockUtils::insertBlockAfter(RegSucc, Region); 6697 return RegSucc; 6698 } 6699 6700 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 6701 VPRecipeBase *PredRecipe, 6702 VPlanPtr &Plan) { 6703 // Instructions marked for predication are replicated and placed under an 6704 // if-then construct to prevent side-effects. 6705 6706 // Generate recipes to compute the block mask for this region. 6707 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 6708 6709 // Build the triangular if-then region. 6710 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 6711 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 6712 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 6713 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 6714 auto *PHIRecipe = 6715 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 6716 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 6717 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 6718 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 6719 6720 // Note: first set Entry as region entry and then connect successors starting 6721 // from it in order, to propagate the "parent" of each VPBasicBlock. 6722 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 6723 VPBlockUtils::connectBlocks(Pred, Exit); 6724 6725 return Region; 6726 } 6727 6728 bool VPRecipeBuilder::tryToCreateRecipe(Instruction *Instr, VFRange &Range, 6729 VPlanPtr &Plan, VPBasicBlock *VPBB) { 6730 VPRecipeBase *Recipe = nullptr; 6731 // Check if Instr should belong to an interleave memory recipe, or already 6732 // does. In the latter case Instr is irrelevant. 6733 if ((Recipe = tryToInterleaveMemory(Instr, Range, Plan))) { 6734 VPBB->appendRecipe(Recipe); 6735 return true; 6736 } 6737 6738 // Check if Instr is a memory operation that should be widened. 6739 if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) { 6740 VPBB->appendRecipe(Recipe); 6741 return true; 6742 } 6743 6744 // Check if Instr should form some PHI recipe. 6745 if ((Recipe = tryToOptimizeInduction(Instr, Range))) { 6746 VPBB->appendRecipe(Recipe); 6747 return true; 6748 } 6749 if ((Recipe = tryToBlend(Instr, Plan))) { 6750 VPBB->appendRecipe(Recipe); 6751 return true; 6752 } 6753 if (PHINode *Phi = dyn_cast<PHINode>(Instr)) { 6754 VPBB->appendRecipe(new VPWidenPHIRecipe(Phi)); 6755 return true; 6756 } 6757 6758 // Check if Instr is to be widened by a general VPWidenRecipe, after 6759 // having first checked for specific widening recipes that deal with 6760 // Interleave Groups, Inductions and Phi nodes. 6761 if (tryToWiden(Instr, VPBB, Range)) 6762 return true; 6763 6764 return false; 6765 } 6766 6767 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 6768 unsigned MaxVF) { 6769 assert(OrigLoop->empty() && "Inner loop expected."); 6770 6771 // Collect conditions feeding internal conditional branches; they need to be 6772 // represented in VPlan for it to model masking. 6773 SmallPtrSet<Value *, 1> NeedDef; 6774 6775 auto *Latch = OrigLoop->getLoopLatch(); 6776 for (BasicBlock *BB : OrigLoop->blocks()) { 6777 if (BB == Latch) 6778 continue; 6779 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 6780 if (Branch && Branch->isConditional()) 6781 NeedDef.insert(Branch->getCondition()); 6782 } 6783 6784 // If the tail is to be folded by masking, the primary induction variable 6785 // needs to be represented in VPlan for it to model early-exit masking. 6786 if (CM.foldTailByMasking()) 6787 NeedDef.insert(Legal->getPrimaryInduction()); 6788 6789 // Collect instructions from the original loop that will become trivially dead 6790 // in the vectorized loop. We don't need to vectorize these instructions. For 6791 // example, original induction update instructions can become dead because we 6792 // separately emit induction "steps" when generating code for the new loop. 6793 // Similarly, we create a new latch condition when setting up the structure 6794 // of the new loop, so the old one can become dead. 6795 SmallPtrSet<Instruction *, 4> DeadInstructions; 6796 collectTriviallyDeadInstructions(DeadInstructions); 6797 6798 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6799 VFRange SubRange = {VF, MaxVF + 1}; 6800 VPlans.push_back( 6801 buildVPlanWithVPRecipes(SubRange, NeedDef, DeadInstructions)); 6802 VF = SubRange.End; 6803 } 6804 } 6805 6806 LoopVectorizationPlanner::VPlanPtr 6807 LoopVectorizationPlanner::buildVPlanWithVPRecipes( 6808 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 6809 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6810 // Hold a mapping from predicated instructions to their recipes, in order to 6811 // fix their AlsoPack behavior if a user is determined to replicate and use a 6812 // scalar instead of vector value. 6813 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 6814 6815 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 6816 DenseMap<Instruction *, Instruction *> SinkAfterInverse; 6817 6818 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 6819 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 6820 auto Plan = llvm::make_unique<VPlan>(VPBB); 6821 6822 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, TTI, Legal, CM, Builder); 6823 // Represent values that will have defs inside VPlan. 6824 for (Value *V : NeedDef) 6825 Plan->addVPValue(V); 6826 6827 // Scan the body of the loop in a topological order to visit each basic block 6828 // after having visited its predecessor basic blocks. 6829 LoopBlocksDFS DFS(OrigLoop); 6830 DFS.perform(LI); 6831 6832 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6833 // Relevant instructions from basic block BB will be grouped into VPRecipe 6834 // ingredients and fill a new VPBasicBlock. 6835 unsigned VPBBsForBB = 0; 6836 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 6837 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 6838 VPBB = FirstVPBBForBB; 6839 Builder.setInsertPoint(VPBB); 6840 6841 std::vector<Instruction *> Ingredients; 6842 6843 // Organize the ingredients to vectorize from current basic block in the 6844 // right order. 6845 for (Instruction &I : BB->instructionsWithoutDebug()) { 6846 Instruction *Instr = &I; 6847 6848 // First filter out irrelevant instructions, to ensure no recipes are 6849 // built for them. 6850 if (isa<BranchInst>(Instr) || 6851 DeadInstructions.find(Instr) != DeadInstructions.end()) 6852 continue; 6853 6854 // I is a member of an InterleaveGroup for Range.Start. If it's an adjunct 6855 // member of the IG, do not construct any Recipe for it. 6856 const InterleaveGroup<Instruction> *IG = 6857 CM.getInterleavedAccessGroup(Instr); 6858 if (IG && Instr != IG->getInsertPos() && 6859 Range.Start >= 2 && // Query is illegal for VF == 1 6860 CM.getWideningDecision(Instr, Range.Start) == 6861 LoopVectorizationCostModel::CM_Interleave) { 6862 auto SinkCandidate = SinkAfterInverse.find(Instr); 6863 if (SinkCandidate != SinkAfterInverse.end()) 6864 Ingredients.push_back(SinkCandidate->second); 6865 continue; 6866 } 6867 6868 // Move instructions to handle first-order recurrences, step 1: avoid 6869 // handling this instruction until after we've handled the instruction it 6870 // should follow. 6871 auto SAIt = SinkAfter.find(Instr); 6872 if (SAIt != SinkAfter.end()) { 6873 LLVM_DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" 6874 << *SAIt->second 6875 << " to vectorize a 1st order recurrence.\n"); 6876 SinkAfterInverse[SAIt->second] = Instr; 6877 continue; 6878 } 6879 6880 Ingredients.push_back(Instr); 6881 6882 // Move instructions to handle first-order recurrences, step 2: push the 6883 // instruction to be sunk at its insertion point. 6884 auto SAInvIt = SinkAfterInverse.find(Instr); 6885 if (SAInvIt != SinkAfterInverse.end()) 6886 Ingredients.push_back(SAInvIt->second); 6887 } 6888 6889 // Introduce each ingredient into VPlan. 6890 for (Instruction *Instr : Ingredients) { 6891 if (RecipeBuilder.tryToCreateRecipe(Instr, Range, Plan, VPBB)) 6892 continue; 6893 6894 // Otherwise, if all widening options failed, Instruction is to be 6895 // replicated. This may create a successor for VPBB. 6896 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 6897 Instr, Range, VPBB, PredInst2Recipe, Plan); 6898 if (NextVPBB != VPBB) { 6899 VPBB = NextVPBB; 6900 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 6901 : ""); 6902 } 6903 } 6904 } 6905 6906 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 6907 // may also be empty, such as the last one VPBB, reflecting original 6908 // basic-blocks with no recipes. 6909 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 6910 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 6911 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 6912 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 6913 delete PreEntry; 6914 6915 std::string PlanName; 6916 raw_string_ostream RSO(PlanName); 6917 unsigned VF = Range.Start; 6918 Plan->addVF(VF); 6919 RSO << "Initial VPlan for VF={" << VF; 6920 for (VF *= 2; VF < Range.End; VF *= 2) { 6921 Plan->addVF(VF); 6922 RSO << "," << VF; 6923 } 6924 RSO << "},UF>=1"; 6925 RSO.flush(); 6926 Plan->setName(PlanName); 6927 6928 return Plan; 6929 } 6930 6931 LoopVectorizationPlanner::VPlanPtr 6932 LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 6933 // Outer loop handling: They may require CFG and instruction level 6934 // transformations before even evaluating whether vectorization is profitable. 6935 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6936 // the vectorization pipeline. 6937 assert(!OrigLoop->empty()); 6938 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6939 6940 // Create new empty VPlan 6941 auto Plan = llvm::make_unique<VPlan>(); 6942 6943 // Build hierarchical CFG 6944 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 6945 HCFGBuilder.buildHierarchicalCFG(); 6946 6947 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 6948 Plan->addVF(VF); 6949 6950 if (EnableVPlanPredication) { 6951 VPlanPredicator VPP(*Plan); 6952 VPP.predicate(); 6953 6954 // Avoid running transformation to recipes until masked code generation in 6955 // VPlan-native path is in place. 6956 return Plan; 6957 } 6958 6959 SmallPtrSet<Instruction *, 1> DeadInstructions; 6960 VPlanHCFGTransforms::VPInstructionsToVPRecipes( 6961 Plan, Legal->getInductionVars(), DeadInstructions); 6962 6963 return Plan; 6964 } 6965 6966 Value* LoopVectorizationPlanner::VPCallbackILV:: 6967 getOrCreateVectorValues(Value *V, unsigned Part) { 6968 return ILV.getOrCreateVectorValue(V, Part); 6969 } 6970 6971 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const { 6972 O << " +\n" 6973 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 6974 IG->getInsertPos()->printAsOperand(O, false); 6975 if (User) { 6976 O << ", "; 6977 User->getOperand(0)->printAsOperand(O); 6978 } 6979 O << "\\l\""; 6980 for (unsigned i = 0; i < IG->getFactor(); ++i) 6981 if (Instruction *I = IG->getMember(i)) 6982 O << " +\n" 6983 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 6984 } 6985 6986 void VPWidenRecipe::execute(VPTransformState &State) { 6987 for (auto &Instr : make_range(Begin, End)) 6988 State.ILV->widenInstruction(Instr); 6989 } 6990 6991 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 6992 assert(!State.Instance && "Int or FP induction being replicated."); 6993 State.ILV->widenIntOrFpInduction(IV, Trunc); 6994 } 6995 6996 void VPWidenPHIRecipe::execute(VPTransformState &State) { 6997 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 6998 } 6999 7000 void VPBlendRecipe::execute(VPTransformState &State) { 7001 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7002 // We know that all PHIs in non-header blocks are converted into 7003 // selects, so we don't have to worry about the insertion order and we 7004 // can just use the builder. 7005 // At this point we generate the predication tree. There may be 7006 // duplications since this is a simple recursive scan, but future 7007 // optimizations will clean it up. 7008 7009 unsigned NumIncoming = Phi->getNumIncomingValues(); 7010 7011 assert((User || NumIncoming == 1) && 7012 "Multiple predecessors with predecessors having a full mask"); 7013 // Generate a sequence of selects of the form: 7014 // SELECT(Mask3, In3, 7015 // SELECT(Mask2, In2, 7016 // ( ...))) 7017 InnerLoopVectorizer::VectorParts Entry(State.UF); 7018 for (unsigned In = 0; In < NumIncoming; ++In) { 7019 for (unsigned Part = 0; Part < State.UF; ++Part) { 7020 // We might have single edge PHIs (blocks) - use an identity 7021 // 'select' for the first PHI operand. 7022 Value *In0 = 7023 State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part); 7024 if (In == 0) 7025 Entry[Part] = In0; // Initialize with the first incoming value. 7026 else { 7027 // Select between the current value and the previous incoming edge 7028 // based on the incoming mask. 7029 Value *Cond = State.get(User->getOperand(In), Part); 7030 Entry[Part] = 7031 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7032 } 7033 } 7034 } 7035 for (unsigned Part = 0; Part < State.UF; ++Part) 7036 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7037 } 7038 7039 void VPInterleaveRecipe::execute(VPTransformState &State) { 7040 assert(!State.Instance && "Interleave group being replicated."); 7041 if (!User) 7042 return State.ILV->vectorizeInterleaveGroup(IG->getInsertPos()); 7043 7044 // Last (and currently only) operand is a mask. 7045 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7046 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7047 for (unsigned Part = 0; Part < State.UF; ++Part) 7048 MaskValues[Part] = State.get(Mask, Part); 7049 State.ILV->vectorizeInterleaveGroup(IG->getInsertPos(), &MaskValues); 7050 } 7051 7052 void VPReplicateRecipe::execute(VPTransformState &State) { 7053 if (State.Instance) { // Generate a single instance. 7054 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 7055 // Insert scalar instance packing it into a vector. 7056 if (AlsoPack && State.VF > 1) { 7057 // If we're constructing lane 0, initialize to start from undef. 7058 if (State.Instance->Lane == 0) { 7059 Value *Undef = 7060 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7061 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7062 } 7063 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7064 } 7065 return; 7066 } 7067 7068 // Generate scalar instances for all VF lanes of all UF parts, unless the 7069 // instruction is uniform inwhich case generate only the first lane for each 7070 // of the UF parts. 7071 unsigned EndLane = IsUniform ? 1 : State.VF; 7072 for (unsigned Part = 0; Part < State.UF; ++Part) 7073 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7074 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7075 } 7076 7077 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7078 assert(State.Instance && "Branch on Mask works only on single instance."); 7079 7080 unsigned Part = State.Instance->Part; 7081 unsigned Lane = State.Instance->Lane; 7082 7083 Value *ConditionBit = nullptr; 7084 if (!User) // Block in mask is all-one. 7085 ConditionBit = State.Builder.getTrue(); 7086 else { 7087 VPValue *BlockInMask = User->getOperand(0); 7088 ConditionBit = State.get(BlockInMask, Part); 7089 if (ConditionBit->getType()->isVectorTy()) 7090 ConditionBit = State.Builder.CreateExtractElement( 7091 ConditionBit, State.Builder.getInt32(Lane)); 7092 } 7093 7094 // Replace the temporary unreachable terminator with a new conditional branch, 7095 // whose two destinations will be set later when they are created. 7096 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7097 assert(isa<UnreachableInst>(CurrentTerminator) && 7098 "Expected to replace unreachable terminator with conditional branch."); 7099 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7100 CondBr->setSuccessor(0, nullptr); 7101 ReplaceInstWithInst(CurrentTerminator, CondBr); 7102 } 7103 7104 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7105 assert(State.Instance && "Predicated instruction PHI works per instance."); 7106 Instruction *ScalarPredInst = cast<Instruction>( 7107 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7108 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7109 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7110 assert(PredicatingBB && "Predicated block has no single predecessor."); 7111 7112 // By current pack/unpack logic we need to generate only a single phi node: if 7113 // a vector value for the predicated instruction exists at this point it means 7114 // the instruction has vector users only, and a phi for the vector value is 7115 // needed. In this case the recipe of the predicated instruction is marked to 7116 // also do that packing, thereby "hoisting" the insert-element sequence. 7117 // Otherwise, a phi node for the scalar value is needed. 7118 unsigned Part = State.Instance->Part; 7119 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7120 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7121 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7122 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7123 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7124 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7125 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7126 } else { 7127 Type *PredInstType = PredInst->getType(); 7128 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7129 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7130 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7131 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7132 } 7133 } 7134 7135 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7136 if (!User) 7137 return State.ILV->vectorizeMemoryInstruction(&Instr); 7138 7139 // Last (and currently only) operand is a mask. 7140 InnerLoopVectorizer::VectorParts MaskValues(State.UF); 7141 VPValue *Mask = User->getOperand(User->getNumOperands() - 1); 7142 for (unsigned Part = 0; Part < State.UF; ++Part) 7143 MaskValues[Part] = State.get(Mask, Part); 7144 State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues); 7145 } 7146 7147 // Process the loop in the VPlan-native vectorization path. This path builds 7148 // VPlan upfront in the vectorization pipeline, which allows to apply 7149 // VPlan-to-VPlan transformations from the very beginning without modifying the 7150 // input LLVM IR. 7151 static bool processLoopInVPlanNativePath( 7152 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7153 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7154 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7155 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 7156 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 7157 7158 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7159 Function *F = L->getHeader()->getParent(); 7160 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7161 LoopVectorizationCostModel CM(L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7162 &Hints, IAI); 7163 // Use the planner for outer loop vectorization. 7164 // TODO: CM is not used at this point inside the planner. Turn CM into an 7165 // optional argument if we don't need it in the future. 7166 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM); 7167 7168 // Get user vectorization factor. 7169 const unsigned UserVF = Hints.getWidth(); 7170 7171 // Check the function attributes and profiles to find out if this function 7172 // should be optimized for size. 7173 bool OptForSize = 7174 Hints.getForce() != LoopVectorizeHints::FK_Enabled && 7175 (F->hasOptSize() || 7176 llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI)); 7177 7178 // Plan how to best vectorize, return the best VF and its cost. 7179 const VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF); 7180 7181 // If we are stress testing VPlan builds, do not attempt to generate vector 7182 // code. Masked vector code generation support will follow soon. 7183 // Also, do not attempt to vectorize if no vector code will be produced. 7184 if (VPlanBuildStressTest || EnableVPlanPredication || 7185 VectorizationFactor::Disabled() == VF) 7186 return false; 7187 7188 LVP.setBestPlan(VF.Width, 1); 7189 7190 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 7191 &CM); 7192 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7193 << L->getHeader()->getParent()->getName() << "\"\n"); 7194 LVP.executePlan(LB, DT); 7195 7196 // Mark the loop as already vectorized to avoid vectorizing again. 7197 Hints.setAlreadyVectorized(); 7198 7199 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7200 return true; 7201 } 7202 7203 bool LoopVectorizePass::processLoop(Loop *L) { 7204 assert((EnableVPlanNativePath || L->empty()) && 7205 "VPlan-native path is not enabled. Only process inner loops."); 7206 7207 #ifndef NDEBUG 7208 const std::string DebugLocStr = getDebugLocString(L); 7209 #endif /* NDEBUG */ 7210 7211 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7212 << L->getHeader()->getParent()->getName() << "\" from " 7213 << DebugLocStr << "\n"); 7214 7215 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 7216 7217 LLVM_DEBUG( 7218 dbgs() << "LV: Loop hints:" 7219 << " force=" 7220 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7221 ? "disabled" 7222 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7223 ? "enabled" 7224 : "?")) 7225 << " width=" << Hints.getWidth() 7226 << " unroll=" << Hints.getInterleave() << "\n"); 7227 7228 // Function containing loop 7229 Function *F = L->getHeader()->getParent(); 7230 7231 // Looking at the diagnostic output is the only way to determine if a loop 7232 // was vectorized (other than looking at the IR or machine code), so it 7233 // is important to generate an optimization remark for each loop. Most of 7234 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7235 // generated as OptimizationRemark and OptimizationRemarkMissed are 7236 // less verbose reporting vectorized loops and unvectorized loops that may 7237 // benefit from vectorization, respectively. 7238 7239 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 7240 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7241 return false; 7242 } 7243 7244 PredicatedScalarEvolution PSE(*SE, *L); 7245 7246 // Check if it is legal to vectorize the loop. 7247 LoopVectorizationRequirements Requirements(*ORE); 7248 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, GetLAA, LI, ORE, 7249 &Requirements, &Hints, DB, AC); 7250 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7251 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7252 Hints.emitRemarkWithHints(); 7253 return false; 7254 } 7255 7256 // Check the function attributes and profiles to find out if this function 7257 // should be optimized for size. 7258 bool OptForSize = 7259 Hints.getForce() != LoopVectorizeHints::FK_Enabled && 7260 (F->hasOptSize() || 7261 llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI)); 7262 7263 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7264 // here. They may require CFG and instruction level transformations before 7265 // even evaluating whether vectorization is profitable. Since we cannot modify 7266 // the incoming IR, we need to build VPlan upfront in the vectorization 7267 // pipeline. 7268 if (!L->empty()) 7269 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7270 ORE, BFI, PSI, Hints); 7271 7272 assert(L->empty() && "Inner loop expected."); 7273 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7274 // count by optimizing for size, to minimize overheads. 7275 // Prefer constant trip counts over profile data, over upper bound estimate. 7276 unsigned ExpectedTC = 0; 7277 bool HasExpectedTC = false; 7278 if (const SCEVConstant *ConstExits = 7279 dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) { 7280 const APInt &ExitsCount = ConstExits->getAPInt(); 7281 // We are interested in small values for ExpectedTC. Skip over those that 7282 // can't fit an unsigned. 7283 if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) { 7284 ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1; 7285 HasExpectedTC = true; 7286 } 7287 } 7288 // ExpectedTC may be large because it's bound by a variable. Check 7289 // profiling information to validate we should vectorize. 7290 if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) { 7291 auto EstimatedTC = getLoopEstimatedTripCount(L); 7292 if (EstimatedTC) { 7293 ExpectedTC = *EstimatedTC; 7294 HasExpectedTC = true; 7295 } 7296 } 7297 if (!HasExpectedTC) { 7298 ExpectedTC = SE->getSmallConstantMaxTripCount(L); 7299 HasExpectedTC = (ExpectedTC > 0); 7300 } 7301 7302 if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) { 7303 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7304 << "This loop is worth vectorizing only if no scalar " 7305 << "iteration overheads are incurred."); 7306 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7307 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7308 else { 7309 LLVM_DEBUG(dbgs() << "\n"); 7310 // Loops with a very small trip count are considered for vectorization 7311 // under OptForSize, thereby making sure the cost of their loop body is 7312 // dominant, free of runtime guards and scalar iteration overheads. 7313 OptForSize = true; 7314 } 7315 } 7316 7317 // Check the function attributes to see if implicit floats are allowed. 7318 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7319 // an integer loop and the vector instructions selected are purely integer 7320 // vector instructions? 7321 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7322 LLVM_DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7323 "attribute is used.\n"); 7324 ORE->emit(createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7325 "NoImplicitFloat", L) 7326 << "loop not vectorized due to NoImplicitFloat attribute"); 7327 Hints.emitRemarkWithHints(); 7328 return false; 7329 } 7330 7331 // Check if the target supports potentially unsafe FP vectorization. 7332 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7333 // for the target we're vectorizing for, to make sure none of the 7334 // additional fp-math flags can help. 7335 if (Hints.isPotentiallyUnsafe() && 7336 TTI->isFPVectorizationPotentiallyUnsafe()) { 7337 LLVM_DEBUG( 7338 dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7339 ORE->emit( 7340 createLVMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7341 << "loop not vectorized due to unsafe FP support."); 7342 Hints.emitRemarkWithHints(); 7343 return false; 7344 } 7345 7346 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7347 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7348 7349 // If an override option has been passed in for interleaved accesses, use it. 7350 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7351 UseInterleaved = EnableInterleavedMemAccesses; 7352 7353 // Analyze interleaved memory accesses. 7354 if (UseInterleaved) { 7355 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7356 } 7357 7358 // Use the cost model. 7359 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7360 &Hints, IAI); 7361 CM.collectValuesToIgnore(); 7362 7363 // Use the planner for vectorization. 7364 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM); 7365 7366 // Get user vectorization factor. 7367 unsigned UserVF = Hints.getWidth(); 7368 7369 // Plan how to best vectorize, return the best VF and its cost. 7370 Optional<VectorizationFactor> MaybeVF = LVP.plan(OptForSize, UserVF); 7371 7372 VectorizationFactor VF = VectorizationFactor::Disabled(); 7373 unsigned IC = 1; 7374 unsigned UserIC = Hints.getInterleave(); 7375 7376 if (MaybeVF) { 7377 VF = *MaybeVF; 7378 // Select the interleave count. 7379 IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7380 } 7381 7382 // Identify the diagnostic messages that should be produced. 7383 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7384 bool VectorizeLoop = true, InterleaveLoop = true; 7385 if (Requirements.doesNotMeet(F, L, Hints)) { 7386 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7387 "requirements.\n"); 7388 Hints.emitRemarkWithHints(); 7389 return false; 7390 } 7391 7392 if (VF.Width == 1) { 7393 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7394 VecDiagMsg = std::make_pair( 7395 "VectorizationNotBeneficial", 7396 "the cost-model indicates that vectorization is not beneficial"); 7397 VectorizeLoop = false; 7398 } 7399 7400 if (!MaybeVF && UserIC > 1) { 7401 // Tell the user interleaving was avoided up-front, despite being explicitly 7402 // requested. 7403 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 7404 "interleaving should be avoided up front\n"); 7405 IntDiagMsg = std::make_pair( 7406 "InterleavingAvoided", 7407 "Ignoring UserIC, because interleaving was avoided up front"); 7408 InterleaveLoop = false; 7409 } else if (IC == 1 && UserIC <= 1) { 7410 // Tell the user interleaving is not beneficial. 7411 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7412 IntDiagMsg = std::make_pair( 7413 "InterleavingNotBeneficial", 7414 "the cost-model indicates that interleaving is not beneficial"); 7415 InterleaveLoop = false; 7416 if (UserIC == 1) { 7417 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7418 IntDiagMsg.second += 7419 " and is explicitly disabled or interleave count is set to 1"; 7420 } 7421 } else if (IC > 1 && UserIC == 1) { 7422 // Tell the user interleaving is beneficial, but it explicitly disabled. 7423 LLVM_DEBUG( 7424 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7425 IntDiagMsg = std::make_pair( 7426 "InterleavingBeneficialButDisabled", 7427 "the cost-model indicates that interleaving is beneficial " 7428 "but is explicitly disabled or interleave count is set to 1"); 7429 InterleaveLoop = false; 7430 } 7431 7432 // Override IC if user provided an interleave count. 7433 IC = UserIC > 0 ? UserIC : IC; 7434 7435 // Emit diagnostic messages, if any. 7436 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7437 if (!VectorizeLoop && !InterleaveLoop) { 7438 // Do not vectorize or interleaving the loop. 7439 ORE->emit([&]() { 7440 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7441 L->getStartLoc(), L->getHeader()) 7442 << VecDiagMsg.second; 7443 }); 7444 ORE->emit([&]() { 7445 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7446 L->getStartLoc(), L->getHeader()) 7447 << IntDiagMsg.second; 7448 }); 7449 return false; 7450 } else if (!VectorizeLoop && InterleaveLoop) { 7451 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7452 ORE->emit([&]() { 7453 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7454 L->getStartLoc(), L->getHeader()) 7455 << VecDiagMsg.second; 7456 }); 7457 } else if (VectorizeLoop && !InterleaveLoop) { 7458 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7459 << ") in " << DebugLocStr << '\n'); 7460 ORE->emit([&]() { 7461 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7462 L->getStartLoc(), L->getHeader()) 7463 << IntDiagMsg.second; 7464 }); 7465 } else if (VectorizeLoop && InterleaveLoop) { 7466 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7467 << ") in " << DebugLocStr << '\n'); 7468 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7469 } 7470 7471 LVP.setBestPlan(VF.Width, IC); 7472 7473 using namespace ore; 7474 bool DisableRuntimeUnroll = false; 7475 MDNode *OrigLoopID = L->getLoopID(); 7476 7477 if (!VectorizeLoop) { 7478 assert(IC > 1 && "interleave count should not be 1 or 0"); 7479 // If we decided that it is not legal to vectorize the loop, then 7480 // interleave it. 7481 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7482 &CM); 7483 LVP.executePlan(Unroller, DT); 7484 7485 ORE->emit([&]() { 7486 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7487 L->getHeader()) 7488 << "interleaved loop (interleaved count: " 7489 << NV("InterleaveCount", IC) << ")"; 7490 }); 7491 } else { 7492 // If we decided that it is *legal* to vectorize the loop, then do it. 7493 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7494 &LVL, &CM); 7495 LVP.executePlan(LB, DT); 7496 ++LoopsVectorized; 7497 7498 // Add metadata to disable runtime unrolling a scalar loop when there are 7499 // no runtime checks about strides and memory. A scalar loop that is 7500 // rarely used is not worth unrolling. 7501 if (!LB.areSafetyChecksAdded()) 7502 DisableRuntimeUnroll = true; 7503 7504 // Report the vectorization decision. 7505 ORE->emit([&]() { 7506 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7507 L->getHeader()) 7508 << "vectorized loop (vectorization width: " 7509 << NV("VectorizationFactor", VF.Width) 7510 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7511 }); 7512 } 7513 7514 Optional<MDNode *> RemainderLoopID = 7515 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7516 LLVMLoopVectorizeFollowupEpilogue}); 7517 if (RemainderLoopID.hasValue()) { 7518 L->setLoopID(RemainderLoopID.getValue()); 7519 } else { 7520 if (DisableRuntimeUnroll) 7521 AddRuntimeUnrollDisableMetaData(L); 7522 7523 // Mark the loop as already vectorized to avoid vectorizing again. 7524 Hints.setAlreadyVectorized(); 7525 } 7526 7527 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7528 return true; 7529 } 7530 7531 bool LoopVectorizePass::runImpl( 7532 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7533 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7534 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7535 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7536 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 7537 SE = &SE_; 7538 LI = &LI_; 7539 TTI = &TTI_; 7540 DT = &DT_; 7541 BFI = &BFI_; 7542 TLI = TLI_; 7543 AA = &AA_; 7544 AC = &AC_; 7545 GetLAA = &GetLAA_; 7546 DB = &DB_; 7547 ORE = &ORE_; 7548 PSI = PSI_; 7549 7550 // Don't attempt if 7551 // 1. the target claims to have no vector registers, and 7552 // 2. interleaving won't help ILP. 7553 // 7554 // The second condition is necessary because, even if the target has no 7555 // vector registers, loop vectorization may still enable scalar 7556 // interleaving. 7557 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7558 return false; 7559 7560 bool Changed = false; 7561 7562 // The vectorizer requires loops to be in simplified form. 7563 // Since simplification may add new inner loops, it has to run before the 7564 // legality and profitability checks. This means running the loop vectorizer 7565 // will simplify all loops, regardless of whether anything end up being 7566 // vectorized. 7567 for (auto &L : *LI) 7568 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 7569 7570 // Build up a worklist of inner-loops to vectorize. This is necessary as 7571 // the act of vectorizing or partially unrolling a loop creates new loops 7572 // and can invalidate iterators across the loops. 7573 SmallVector<Loop *, 8> Worklist; 7574 7575 for (Loop *L : *LI) 7576 collectSupportedLoops(*L, LI, ORE, Worklist); 7577 7578 LoopsAnalyzed += Worklist.size(); 7579 7580 // Now walk the identified inner loops. 7581 while (!Worklist.empty()) { 7582 Loop *L = Worklist.pop_back_val(); 7583 7584 // For the inner loops we actually process, form LCSSA to simplify the 7585 // transform. 7586 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7587 7588 Changed |= processLoop(L); 7589 } 7590 7591 // Process each loop nest in the function. 7592 return Changed; 7593 } 7594 7595 PreservedAnalyses LoopVectorizePass::run(Function &F, 7596 FunctionAnalysisManager &AM) { 7597 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7598 auto &LI = AM.getResult<LoopAnalysis>(F); 7599 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7600 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7601 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7602 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7603 auto &AA = AM.getResult<AAManager>(F); 7604 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7605 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7606 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7607 MemorySSA *MSSA = EnableMSSALoopDependency 7608 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 7609 : nullptr; 7610 7611 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7612 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7613 [&](Loop &L) -> const LoopAccessInfo & { 7614 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA}; 7615 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7616 }; 7617 const ModuleAnalysisManager &MAM = 7618 AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager(); 7619 ProfileSummaryInfo *PSI = 7620 MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 7621 bool Changed = 7622 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 7623 if (!Changed) 7624 return PreservedAnalyses::all(); 7625 PreservedAnalyses PA; 7626 7627 // We currently do not preserve loopinfo/dominator analyses with outer loop 7628 // vectorization. Until this is addressed, mark these analyses as preserved 7629 // only for non-VPlan-native path. 7630 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 7631 if (!EnableVPlanNativePath) { 7632 PA.preserve<LoopAnalysis>(); 7633 PA.preserve<DominatorTreeAnalysis>(); 7634 } 7635 PA.preserve<BasicAA>(); 7636 PA.preserve<GlobalsAA>(); 7637 return PA; 7638 } 7639