1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpander.h" 95 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 96 #include "llvm/Analysis/TargetLibraryInfo.h" 97 #include "llvm/Analysis/TargetTransformInfo.h" 98 #include "llvm/Analysis/VectorUtils.h" 99 #include "llvm/IR/Attributes.h" 100 #include "llvm/IR/BasicBlock.h" 101 #include "llvm/IR/CFG.h" 102 #include "llvm/IR/Constant.h" 103 #include "llvm/IR/Constants.h" 104 #include "llvm/IR/DataLayout.h" 105 #include "llvm/IR/DebugInfoMetadata.h" 106 #include "llvm/IR/DebugLoc.h" 107 #include "llvm/IR/DerivedTypes.h" 108 #include "llvm/IR/DiagnosticInfo.h" 109 #include "llvm/IR/Dominators.h" 110 #include "llvm/IR/Function.h" 111 #include "llvm/IR/IRBuilder.h" 112 #include "llvm/IR/InstrTypes.h" 113 #include "llvm/IR/Instruction.h" 114 #include "llvm/IR/Instructions.h" 115 #include "llvm/IR/IntrinsicInst.h" 116 #include "llvm/IR/Intrinsics.h" 117 #include "llvm/IR/LLVMContext.h" 118 #include "llvm/IR/Metadata.h" 119 #include "llvm/IR/Module.h" 120 #include "llvm/IR/Operator.h" 121 #include "llvm/IR/Type.h" 122 #include "llvm/IR/Use.h" 123 #include "llvm/IR/User.h" 124 #include "llvm/IR/Value.h" 125 #include "llvm/IR/ValueHandle.h" 126 #include "llvm/IR/Verifier.h" 127 #include "llvm/InitializePasses.h" 128 #include "llvm/Pass.h" 129 #include "llvm/Support/Casting.h" 130 #include "llvm/Support/CommandLine.h" 131 #include "llvm/Support/Compiler.h" 132 #include "llvm/Support/Debug.h" 133 #include "llvm/Support/ErrorHandling.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <cstdlib> 147 #include <functional> 148 #include <iterator> 149 #include <limits> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 /// @{ 161 /// Metadata attribute names 162 static const char *const LLVMLoopVectorizeFollowupAll = 163 "llvm.loop.vectorize.followup_all"; 164 static const char *const LLVMLoopVectorizeFollowupVectorized = 165 "llvm.loop.vectorize.followup_vectorized"; 166 static const char *const LLVMLoopVectorizeFollowupEpilogue = 167 "llvm.loop.vectorize.followup_epilogue"; 168 /// @} 169 170 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 172 173 /// Loops with a known constant trip count below this number are vectorized only 174 /// if no scalar iteration overheads are incurred. 175 static cl::opt<unsigned> TinyTripCountVectorThreshold( 176 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 177 cl::desc("Loops with a constant trip count that is smaller than this " 178 "value are vectorized only if no scalar iteration overheads " 179 "are incurred.")); 180 181 // Indicates that an epilogue is undesired, predication is preferred. 182 // This means that the vectorizer will try to fold the loop-tail (epilogue) 183 // into the loop and predicate the loop body accordingly. 184 static cl::opt<bool> PreferPredicateOverEpilog( 185 "prefer-predicate-over-epilog", cl::init(false), cl::Hidden, 186 cl::desc("Indicate that an epilogue is undesired, predication should be " 187 "used instead.")); 188 189 static cl::opt<bool> MaximizeBandwidth( 190 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 191 cl::desc("Maximize bandwidth when selecting vectorization factor which " 192 "will be determined by the smallest type in loop.")); 193 194 static cl::opt<bool> EnableInterleavedMemAccesses( 195 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 196 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 197 198 /// An interleave-group may need masking if it resides in a block that needs 199 /// predication, or in order to mask away gaps. 200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 201 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 202 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 203 204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 205 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 206 cl::desc("We don't interleave loops with a estimated constant trip count " 207 "below this number")); 208 209 static cl::opt<unsigned> ForceTargetNumScalarRegs( 210 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 211 cl::desc("A flag that overrides the target's number of scalar registers.")); 212 213 static cl::opt<unsigned> ForceTargetNumVectorRegs( 214 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 215 cl::desc("A flag that overrides the target's number of vector registers.")); 216 217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 218 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 219 cl::desc("A flag that overrides the target's max interleave factor for " 220 "scalar loops.")); 221 222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 223 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 224 cl::desc("A flag that overrides the target's max interleave factor for " 225 "vectorized loops.")); 226 227 static cl::opt<unsigned> ForceTargetInstructionCost( 228 "force-target-instruction-cost", cl::init(0), cl::Hidden, 229 cl::desc("A flag that overrides the target's expected cost for " 230 "an instruction to a single constant value. Mostly " 231 "useful for getting consistent testing.")); 232 233 static cl::opt<unsigned> SmallLoopCost( 234 "small-loop-cost", cl::init(20), cl::Hidden, 235 cl::desc( 236 "The cost of a loop that is considered 'small' by the interleaver.")); 237 238 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 239 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 240 cl::desc("Enable the use of the block frequency analysis to access PGO " 241 "heuristics minimizing code growth in cold regions and being more " 242 "aggressive in hot regions.")); 243 244 // Runtime interleave loops for load/store throughput. 245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 246 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 247 cl::desc( 248 "Enable runtime interleaving until load/store ports are saturated")); 249 250 /// The number of stores in a loop that are allowed to need predication. 251 static cl::opt<unsigned> NumberOfStoresToPredicate( 252 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 253 cl::desc("Max number of stores to be predicated behind an if.")); 254 255 static cl::opt<bool> EnableIndVarRegisterHeur( 256 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 257 cl::desc("Count the induction variable only once when interleaving")); 258 259 static cl::opt<bool> EnableCondStoresVectorization( 260 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 261 cl::desc("Enable if predication of stores during vectorization.")); 262 263 static cl::opt<unsigned> MaxNestedScalarReductionIC( 264 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 265 cl::desc("The maximum interleave count to use when interleaving a scalar " 266 "reduction in a nested loop.")); 267 268 cl::opt<bool> EnableVPlanNativePath( 269 "enable-vplan-native-path", cl::init(false), cl::Hidden, 270 cl::desc("Enable VPlan-native vectorization path with " 271 "support for outer loop vectorization.")); 272 273 // FIXME: Remove this switch once we have divergence analysis. Currently we 274 // assume divergent non-backedge branches when this switch is true. 275 cl::opt<bool> EnableVPlanPredication( 276 "enable-vplan-predication", cl::init(false), cl::Hidden, 277 cl::desc("Enable VPlan-native vectorization path predicator with " 278 "support for outer loop vectorization.")); 279 280 // This flag enables the stress testing of the VPlan H-CFG construction in the 281 // VPlan-native vectorization path. It must be used in conjuction with 282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 283 // verification of the H-CFGs built. 284 static cl::opt<bool> VPlanBuildStressTest( 285 "vplan-build-stress-test", cl::init(false), cl::Hidden, 286 cl::desc( 287 "Build VPlan for every supported loop nest in the function and bail " 288 "out right after the build (stress test the VPlan H-CFG construction " 289 "in the VPlan-native vectorization path).")); 290 291 cl::opt<bool> llvm::EnableLoopInterleaving( 292 "interleave-loops", cl::init(true), cl::Hidden, 293 cl::desc("Enable loop interleaving in Loop vectorization passes")); 294 cl::opt<bool> llvm::EnableLoopVectorization( 295 "vectorize-loops", cl::init(true), cl::Hidden, 296 cl::desc("Run the Loop vectorization passes")); 297 298 /// A helper function that returns the type of loaded or stored value. 299 static Type *getMemInstValueType(Value *I) { 300 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 301 "Expected Load or Store instruction"); 302 if (auto *LI = dyn_cast<LoadInst>(I)) 303 return LI->getType(); 304 return cast<StoreInst>(I)->getValueOperand()->getType(); 305 } 306 307 /// A helper function that returns true if the given type is irregular. The 308 /// type is irregular if its allocated size doesn't equal the store size of an 309 /// element of the corresponding vector type at the given vectorization factor. 310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 311 // Determine if an array of VF elements of type Ty is "bitcast compatible" 312 // with a <VF x Ty> vector. 313 if (VF > 1) { 314 auto *VectorTy = VectorType::get(Ty, VF); 315 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 316 } 317 318 // If the vectorization factor is one, we just check if an array of type Ty 319 // requires padding between elements. 320 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 321 } 322 323 /// A helper function that returns the reciprocal of the block probability of 324 /// predicated blocks. If we return X, we are assuming the predicated block 325 /// will execute once for every X iterations of the loop header. 326 /// 327 /// TODO: We should use actual block probability here, if available. Currently, 328 /// we always assume predicated blocks have a 50% chance of executing. 329 static unsigned getReciprocalPredBlockProb() { return 2; } 330 331 /// A helper function that adds a 'fast' flag to floating-point operations. 332 static Value *addFastMathFlag(Value *V) { 333 if (isa<FPMathOperator>(V)) 334 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 335 return V; 336 } 337 338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 339 if (isa<FPMathOperator>(V)) 340 cast<Instruction>(V)->setFastMathFlags(FMF); 341 return V; 342 } 343 344 /// A helper function that returns an integer or floating-point constant with 345 /// value C. 346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 347 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 348 : ConstantFP::get(Ty, C); 349 } 350 351 /// Returns "best known" trip count for the specified loop \p L as defined by 352 /// the following procedure: 353 /// 1) Returns exact trip count if it is known. 354 /// 2) Returns expected trip count according to profile data if any. 355 /// 3) Returns upper bound estimate if it is known. 356 /// 4) Returns None if all of the above failed. 357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 358 // Check if exact trip count is known. 359 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 360 return ExpectedTC; 361 362 // Check if there is an expected trip count available from profile data. 363 if (LoopVectorizeWithBlockFrequency) 364 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 365 return EstimatedTC; 366 367 // Check if upper bound estimate is known. 368 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 369 return ExpectedTC; 370 371 return None; 372 } 373 374 namespace llvm { 375 376 /// InnerLoopVectorizer vectorizes loops which contain only one basic 377 /// block to a specified vectorization factor (VF). 378 /// This class performs the widening of scalars into vectors, or multiple 379 /// scalars. This class also implements the following features: 380 /// * It inserts an epilogue loop for handling loops that don't have iteration 381 /// counts that are known to be a multiple of the vectorization factor. 382 /// * It handles the code generation for reduction variables. 383 /// * Scalarization (implementation using scalars) of un-vectorizable 384 /// instructions. 385 /// InnerLoopVectorizer does not perform any vectorization-legality 386 /// checks, and relies on the caller to check for the different legality 387 /// aspects. The InnerLoopVectorizer relies on the 388 /// LoopVectorizationLegality class to provide information about the induction 389 /// and reduction variables that were found to a given vectorization factor. 390 class InnerLoopVectorizer { 391 public: 392 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 393 LoopInfo *LI, DominatorTree *DT, 394 const TargetLibraryInfo *TLI, 395 const TargetTransformInfo *TTI, AssumptionCache *AC, 396 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 397 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 398 LoopVectorizationCostModel *CM) 399 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 400 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 401 Builder(PSE.getSE()->getContext()), 402 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 403 virtual ~InnerLoopVectorizer() = default; 404 405 /// Create a new empty loop. Unlink the old loop and connect the new one. 406 /// Return the pre-header block of the new loop. 407 BasicBlock *createVectorizedLoopSkeleton(); 408 409 /// Widen a single instruction within the innermost loop. 410 void widenInstruction(Instruction &I, VPUser &Operands, 411 VPTransformState &State); 412 413 /// Widen a single call instruction within the innermost loop. 414 void widenCallInstruction(CallInst &I, VPUser &ArgOperands, 415 VPTransformState &State); 416 417 /// Widen a single select instruction within the innermost loop. 418 void widenSelectInstruction(SelectInst &I, bool InvariantCond); 419 420 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 421 void fixVectorizedLoop(); 422 423 // Return true if any runtime check is added. 424 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 425 426 /// A type for vectorized values in the new loop. Each value from the 427 /// original loop, when vectorized, is represented by UF vector values in the 428 /// new unrolled loop, where UF is the unroll factor. 429 using VectorParts = SmallVector<Value *, 2>; 430 431 /// Vectorize a single GetElementPtrInst based on information gathered and 432 /// decisions taken during planning. 433 void widenGEP(GetElementPtrInst *GEP, unsigned UF, unsigned VF, 434 bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant); 435 436 /// Vectorize a single PHINode in a block. This method handles the induction 437 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 438 /// arbitrary length vectors. 439 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 440 441 /// A helper function to scalarize a single Instruction in the innermost loop. 442 /// Generates a sequence of scalar instances for each lane between \p MinLane 443 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 444 /// inclusive.. 445 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 446 bool IfPredicateInstr); 447 448 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 449 /// is provided, the integer induction variable will first be truncated to 450 /// the corresponding type. 451 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 452 453 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 454 /// vector or scalar value on-demand if one is not yet available. When 455 /// vectorizing a loop, we visit the definition of an instruction before its 456 /// uses. When visiting the definition, we either vectorize or scalarize the 457 /// instruction, creating an entry for it in the corresponding map. (In some 458 /// cases, such as induction variables, we will create both vector and scalar 459 /// entries.) Then, as we encounter uses of the definition, we derive values 460 /// for each scalar or vector use unless such a value is already available. 461 /// For example, if we scalarize a definition and one of its uses is vector, 462 /// we build the required vector on-demand with an insertelement sequence 463 /// when visiting the use. Otherwise, if the use is scalar, we can use the 464 /// existing scalar definition. 465 /// 466 /// Return a value in the new loop corresponding to \p V from the original 467 /// loop at unroll index \p Part. If the value has already been vectorized, 468 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 469 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 470 /// a new vector value on-demand by inserting the scalar values into a vector 471 /// with an insertelement sequence. If the value has been neither vectorized 472 /// nor scalarized, it must be loop invariant, so we simply broadcast the 473 /// value into a vector. 474 Value *getOrCreateVectorValue(Value *V, unsigned Part); 475 476 /// Return a value in the new loop corresponding to \p V from the original 477 /// loop at unroll and vector indices \p Instance. If the value has been 478 /// vectorized but not scalarized, the necessary extractelement instruction 479 /// will be generated. 480 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 481 482 /// Construct the vector value of a scalarized value \p V one lane at a time. 483 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 484 485 /// Try to vectorize interleaved access group \p Group with the base address 486 /// given in \p Addr, optionally masking the vector operations if \p 487 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 488 /// values in the vectorized loop. 489 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 490 VPTransformState &State, VPValue *Addr, 491 VPValue *BlockInMask = nullptr); 492 493 /// Vectorize Load and Store instructions with the base address given in \p 494 /// Addr, optionally masking the vector operations if \p BlockInMask is 495 /// non-null. Use \p State to translate given VPValues to IR values in the 496 /// vectorized loop. 497 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 498 VPValue *Addr, VPValue *StoredValue, 499 VPValue *BlockInMask); 500 501 /// Set the debug location in the builder using the debug location in 502 /// the instruction. 503 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 504 505 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 506 void fixNonInductionPHIs(void); 507 508 protected: 509 friend class LoopVectorizationPlanner; 510 511 /// A small list of PHINodes. 512 using PhiVector = SmallVector<PHINode *, 4>; 513 514 /// A type for scalarized values in the new loop. Each value from the 515 /// original loop, when scalarized, is represented by UF x VF scalar values 516 /// in the new unrolled loop, where UF is the unroll factor and VF is the 517 /// vectorization factor. 518 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 519 520 /// Set up the values of the IVs correctly when exiting the vector loop. 521 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 522 Value *CountRoundDown, Value *EndValue, 523 BasicBlock *MiddleBlock); 524 525 /// Create a new induction variable inside L. 526 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 527 Value *Step, Instruction *DL); 528 529 /// Handle all cross-iteration phis in the header. 530 void fixCrossIterationPHIs(); 531 532 /// Fix a first-order recurrence. This is the second phase of vectorizing 533 /// this phi node. 534 void fixFirstOrderRecurrence(PHINode *Phi); 535 536 /// Fix a reduction cross-iteration phi. This is the second phase of 537 /// vectorizing this phi node. 538 void fixReduction(PHINode *Phi); 539 540 /// Clear NSW/NUW flags from reduction instructions if necessary. 541 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc); 542 543 /// The Loop exit block may have single value PHI nodes with some 544 /// incoming value. While vectorizing we only handled real values 545 /// that were defined inside the loop and we should have one value for 546 /// each predecessor of its parent basic block. See PR14725. 547 void fixLCSSAPHIs(); 548 549 /// Iteratively sink the scalarized operands of a predicated instruction into 550 /// the block that was created for it. 551 void sinkScalarOperands(Instruction *PredInst); 552 553 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 554 /// represented as. 555 void truncateToMinimalBitwidths(); 556 557 /// Create a broadcast instruction. This method generates a broadcast 558 /// instruction (shuffle) for loop invariant values and for the induction 559 /// value. If this is the induction variable then we extend it to N, N+1, ... 560 /// this is needed because each iteration in the loop corresponds to a SIMD 561 /// element. 562 virtual Value *getBroadcastInstrs(Value *V); 563 564 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 565 /// to each vector element of Val. The sequence starts at StartIndex. 566 /// \p Opcode is relevant for FP induction variable. 567 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 568 Instruction::BinaryOps Opcode = 569 Instruction::BinaryOpsEnd); 570 571 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 572 /// variable on which to base the steps, \p Step is the size of the step, and 573 /// \p EntryVal is the value from the original loop that maps to the steps. 574 /// Note that \p EntryVal doesn't have to be an induction variable - it 575 /// can also be a truncate instruction. 576 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 577 const InductionDescriptor &ID); 578 579 /// Create a vector induction phi node based on an existing scalar one. \p 580 /// EntryVal is the value from the original loop that maps to the vector phi 581 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 582 /// truncate instruction, instead of widening the original IV, we widen a 583 /// version of the IV truncated to \p EntryVal's type. 584 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 585 Value *Step, Instruction *EntryVal); 586 587 /// Returns true if an instruction \p I should be scalarized instead of 588 /// vectorized for the chosen vectorization factor. 589 bool shouldScalarizeInstruction(Instruction *I) const; 590 591 /// Returns true if we should generate a scalar version of \p IV. 592 bool needsScalarInduction(Instruction *IV) const; 593 594 /// If there is a cast involved in the induction variable \p ID, which should 595 /// be ignored in the vectorized loop body, this function records the 596 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 597 /// cast. We had already proved that the casted Phi is equal to the uncasted 598 /// Phi in the vectorized loop (under a runtime guard), and therefore 599 /// there is no need to vectorize the cast - the same value can be used in the 600 /// vector loop for both the Phi and the cast. 601 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 602 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 603 /// 604 /// \p EntryVal is the value from the original loop that maps to the vector 605 /// phi node and is used to distinguish what is the IV currently being 606 /// processed - original one (if \p EntryVal is a phi corresponding to the 607 /// original IV) or the "newly-created" one based on the proof mentioned above 608 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 609 /// latter case \p EntryVal is a TruncInst and we must not record anything for 610 /// that IV, but it's error-prone to expect callers of this routine to care 611 /// about that, hence this explicit parameter. 612 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 613 const Instruction *EntryVal, 614 Value *VectorLoopValue, 615 unsigned Part, 616 unsigned Lane = UINT_MAX); 617 618 /// Generate a shuffle sequence that will reverse the vector Vec. 619 virtual Value *reverseVector(Value *Vec); 620 621 /// Returns (and creates if needed) the original loop trip count. 622 Value *getOrCreateTripCount(Loop *NewLoop); 623 624 /// Returns (and creates if needed) the trip count of the widened loop. 625 Value *getOrCreateVectorTripCount(Loop *NewLoop); 626 627 /// Returns a bitcasted value to the requested vector type. 628 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 629 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 630 const DataLayout &DL); 631 632 /// Emit a bypass check to see if the vector trip count is zero, including if 633 /// it overflows. 634 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 635 636 /// Emit a bypass check to see if all of the SCEV assumptions we've 637 /// had to make are correct. 638 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 639 640 /// Emit bypass checks to check any memory assumptions we may have made. 641 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 642 643 /// Compute the transformed value of Index at offset StartValue using step 644 /// StepValue. 645 /// For integer induction, returns StartValue + Index * StepValue. 646 /// For pointer induction, returns StartValue[Index * StepValue]. 647 /// FIXME: The newly created binary instructions should contain nsw/nuw 648 /// flags, which can be found from the original scalar operations. 649 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 650 const DataLayout &DL, 651 const InductionDescriptor &ID) const; 652 653 /// Add additional metadata to \p To that was not present on \p Orig. 654 /// 655 /// Currently this is used to add the noalias annotations based on the 656 /// inserted memchecks. Use this for instructions that are *cloned* into the 657 /// vector loop. 658 void addNewMetadata(Instruction *To, const Instruction *Orig); 659 660 /// Add metadata from one instruction to another. 661 /// 662 /// This includes both the original MDs from \p From and additional ones (\see 663 /// addNewMetadata). Use this for *newly created* instructions in the vector 664 /// loop. 665 void addMetadata(Instruction *To, Instruction *From); 666 667 /// Similar to the previous function but it adds the metadata to a 668 /// vector of instructions. 669 void addMetadata(ArrayRef<Value *> To, Instruction *From); 670 671 /// The original loop. 672 Loop *OrigLoop; 673 674 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 675 /// dynamic knowledge to simplify SCEV expressions and converts them to a 676 /// more usable form. 677 PredicatedScalarEvolution &PSE; 678 679 /// Loop Info. 680 LoopInfo *LI; 681 682 /// Dominator Tree. 683 DominatorTree *DT; 684 685 /// Alias Analysis. 686 AliasAnalysis *AA; 687 688 /// Target Library Info. 689 const TargetLibraryInfo *TLI; 690 691 /// Target Transform Info. 692 const TargetTransformInfo *TTI; 693 694 /// Assumption Cache. 695 AssumptionCache *AC; 696 697 /// Interface to emit optimization remarks. 698 OptimizationRemarkEmitter *ORE; 699 700 /// LoopVersioning. It's only set up (non-null) if memchecks were 701 /// used. 702 /// 703 /// This is currently only used to add no-alias metadata based on the 704 /// memchecks. The actually versioning is performed manually. 705 std::unique_ptr<LoopVersioning> LVer; 706 707 /// The vectorization SIMD factor to use. Each vector will have this many 708 /// vector elements. 709 unsigned VF; 710 711 /// The vectorization unroll factor to use. Each scalar is vectorized to this 712 /// many different vector instructions. 713 unsigned UF; 714 715 /// The builder that we use 716 IRBuilder<> Builder; 717 718 // --- Vectorization state --- 719 720 /// The vector-loop preheader. 721 BasicBlock *LoopVectorPreHeader; 722 723 /// The scalar-loop preheader. 724 BasicBlock *LoopScalarPreHeader; 725 726 /// Middle Block between the vector and the scalar. 727 BasicBlock *LoopMiddleBlock; 728 729 /// The ExitBlock of the scalar loop. 730 BasicBlock *LoopExitBlock; 731 732 /// The vector loop body. 733 BasicBlock *LoopVectorBody; 734 735 /// The scalar loop body. 736 BasicBlock *LoopScalarBody; 737 738 /// A list of all bypass blocks. The first block is the entry of the loop. 739 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 740 741 /// The new Induction variable which was added to the new block. 742 PHINode *Induction = nullptr; 743 744 /// The induction variable of the old basic block. 745 PHINode *OldInduction = nullptr; 746 747 /// Maps values from the original loop to their corresponding values in the 748 /// vectorized loop. A key value can map to either vector values, scalar 749 /// values or both kinds of values, depending on whether the key was 750 /// vectorized and scalarized. 751 VectorizerValueMap VectorLoopValueMap; 752 753 /// Store instructions that were predicated. 754 SmallVector<Instruction *, 4> PredicatedInstructions; 755 756 /// Trip count of the original loop. 757 Value *TripCount = nullptr; 758 759 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 760 Value *VectorTripCount = nullptr; 761 762 /// The legality analysis. 763 LoopVectorizationLegality *Legal; 764 765 /// The profitablity analysis. 766 LoopVectorizationCostModel *Cost; 767 768 // Record whether runtime checks are added. 769 bool AddedSafetyChecks = false; 770 771 // Holds the end values for each induction variable. We save the end values 772 // so we can later fix-up the external users of the induction variables. 773 DenseMap<PHINode *, Value *> IVEndValues; 774 775 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 776 // fixed up at the end of vector code generation. 777 SmallVector<PHINode *, 8> OrigPHIsToFix; 778 }; 779 780 class InnerLoopUnroller : public InnerLoopVectorizer { 781 public: 782 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 783 LoopInfo *LI, DominatorTree *DT, 784 const TargetLibraryInfo *TLI, 785 const TargetTransformInfo *TTI, AssumptionCache *AC, 786 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 787 LoopVectorizationLegality *LVL, 788 LoopVectorizationCostModel *CM) 789 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 790 UnrollFactor, LVL, CM) {} 791 792 private: 793 Value *getBroadcastInstrs(Value *V) override; 794 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 795 Instruction::BinaryOps Opcode = 796 Instruction::BinaryOpsEnd) override; 797 Value *reverseVector(Value *Vec) override; 798 }; 799 800 } // end namespace llvm 801 802 /// Look for a meaningful debug location on the instruction or it's 803 /// operands. 804 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 805 if (!I) 806 return I; 807 808 DebugLoc Empty; 809 if (I->getDebugLoc() != Empty) 810 return I; 811 812 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 813 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 814 if (OpInst->getDebugLoc() != Empty) 815 return OpInst; 816 } 817 818 return I; 819 } 820 821 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 822 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 823 const DILocation *DIL = Inst->getDebugLoc(); 824 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 825 !isa<DbgInfoIntrinsic>(Inst)) { 826 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF); 827 if (NewDIL) 828 B.SetCurrentDebugLocation(NewDIL.getValue()); 829 else 830 LLVM_DEBUG(dbgs() 831 << "Failed to create new discriminator: " 832 << DIL->getFilename() << " Line: " << DIL->getLine()); 833 } 834 else 835 B.SetCurrentDebugLocation(DIL); 836 } else 837 B.SetCurrentDebugLocation(DebugLoc()); 838 } 839 840 /// Write a record \p DebugMsg about vectorization failure to the debug 841 /// output stream. If \p I is passed, it is an instruction that prevents 842 /// vectorization. 843 #ifndef NDEBUG 844 static void debugVectorizationFailure(const StringRef DebugMsg, 845 Instruction *I) { 846 dbgs() << "LV: Not vectorizing: " << DebugMsg; 847 if (I != nullptr) 848 dbgs() << " " << *I; 849 else 850 dbgs() << '.'; 851 dbgs() << '\n'; 852 } 853 #endif 854 855 /// Create an analysis remark that explains why vectorization failed 856 /// 857 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 858 /// RemarkName is the identifier for the remark. If \p I is passed it is an 859 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 860 /// the location of the remark. \return the remark object that can be 861 /// streamed to. 862 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 863 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 864 Value *CodeRegion = TheLoop->getHeader(); 865 DebugLoc DL = TheLoop->getStartLoc(); 866 867 if (I) { 868 CodeRegion = I->getParent(); 869 // If there is no debug location attached to the instruction, revert back to 870 // using the loop's. 871 if (I->getDebugLoc()) 872 DL = I->getDebugLoc(); 873 } 874 875 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 876 R << "loop not vectorized: "; 877 return R; 878 } 879 880 namespace llvm { 881 882 void reportVectorizationFailure(const StringRef DebugMsg, 883 const StringRef OREMsg, const StringRef ORETag, 884 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 885 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 886 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 887 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 888 ORETag, TheLoop, I) << OREMsg); 889 } 890 891 } // end namespace llvm 892 893 #ifndef NDEBUG 894 /// \return string containing a file name and a line # for the given loop. 895 static std::string getDebugLocString(const Loop *L) { 896 std::string Result; 897 if (L) { 898 raw_string_ostream OS(Result); 899 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 900 LoopDbgLoc.print(OS); 901 else 902 // Just print the module name. 903 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 904 OS.flush(); 905 } 906 return Result; 907 } 908 #endif 909 910 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 911 const Instruction *Orig) { 912 // If the loop was versioned with memchecks, add the corresponding no-alias 913 // metadata. 914 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 915 LVer->annotateInstWithNoAlias(To, Orig); 916 } 917 918 void InnerLoopVectorizer::addMetadata(Instruction *To, 919 Instruction *From) { 920 propagateMetadata(To, From); 921 addNewMetadata(To, From); 922 } 923 924 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 925 Instruction *From) { 926 for (Value *V : To) { 927 if (Instruction *I = dyn_cast<Instruction>(V)) 928 addMetadata(I, From); 929 } 930 } 931 932 namespace llvm { 933 934 // Loop vectorization cost-model hints how the scalar epilogue loop should be 935 // lowered. 936 enum ScalarEpilogueLowering { 937 938 // The default: allowing scalar epilogues. 939 CM_ScalarEpilogueAllowed, 940 941 // Vectorization with OptForSize: don't allow epilogues. 942 CM_ScalarEpilogueNotAllowedOptSize, 943 944 // A special case of vectorisation with OptForSize: loops with a very small 945 // trip count are considered for vectorization under OptForSize, thereby 946 // making sure the cost of their loop body is dominant, free of runtime 947 // guards and scalar iteration overheads. 948 CM_ScalarEpilogueNotAllowedLowTripLoop, 949 950 // Loop hint predicate indicating an epilogue is undesired. 951 CM_ScalarEpilogueNotNeededUsePredicate 952 }; 953 954 /// LoopVectorizationCostModel - estimates the expected speedups due to 955 /// vectorization. 956 /// In many cases vectorization is not profitable. This can happen because of 957 /// a number of reasons. In this class we mainly attempt to predict the 958 /// expected speedup/slowdowns due to the supported instruction set. We use the 959 /// TargetTransformInfo to query the different backends for the cost of 960 /// different operations. 961 class LoopVectorizationCostModel { 962 public: 963 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 964 PredicatedScalarEvolution &PSE, LoopInfo *LI, 965 LoopVectorizationLegality *Legal, 966 const TargetTransformInfo &TTI, 967 const TargetLibraryInfo *TLI, DemandedBits *DB, 968 AssumptionCache *AC, 969 OptimizationRemarkEmitter *ORE, const Function *F, 970 const LoopVectorizeHints *Hints, 971 InterleavedAccessInfo &IAI) 972 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 973 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 974 Hints(Hints), InterleaveInfo(IAI) {} 975 976 /// \return An upper bound for the vectorization factor, or None if 977 /// vectorization and interleaving should be avoided up front. 978 Optional<unsigned> computeMaxVF(); 979 980 /// \return True if runtime checks are required for vectorization, and false 981 /// otherwise. 982 bool runtimeChecksRequired(); 983 984 /// \return The most profitable vectorization factor and the cost of that VF. 985 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 986 /// then this vectorization factor will be selected if vectorization is 987 /// possible. 988 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 989 990 /// Setup cost-based decisions for user vectorization factor. 991 void selectUserVectorizationFactor(unsigned UserVF) { 992 collectUniformsAndScalars(UserVF); 993 collectInstsToScalarize(UserVF); 994 } 995 996 /// \return The size (in bits) of the smallest and widest types in the code 997 /// that needs to be vectorized. We ignore values that remain scalar such as 998 /// 64 bit loop indices. 999 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1000 1001 /// \return The desired interleave count. 1002 /// If interleave count has been specified by metadata it will be returned. 1003 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1004 /// are the selected vectorization factor and the cost of the selected VF. 1005 unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost); 1006 1007 /// Memory access instruction may be vectorized in more than one way. 1008 /// Form of instruction after vectorization depends on cost. 1009 /// This function takes cost-based decisions for Load/Store instructions 1010 /// and collects them in a map. This decisions map is used for building 1011 /// the lists of loop-uniform and loop-scalar instructions. 1012 /// The calculated cost is saved with widening decision in order to 1013 /// avoid redundant calculations. 1014 void setCostBasedWideningDecision(unsigned VF); 1015 1016 /// A struct that represents some properties of the register usage 1017 /// of a loop. 1018 struct RegisterUsage { 1019 /// Holds the number of loop invariant values that are used in the loop. 1020 /// The key is ClassID of target-provided register class. 1021 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1022 /// Holds the maximum number of concurrent live intervals in the loop. 1023 /// The key is ClassID of target-provided register class. 1024 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1025 }; 1026 1027 /// \return Returns information about the register usages of the loop for the 1028 /// given vectorization factors. 1029 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1030 1031 /// Collect values we want to ignore in the cost model. 1032 void collectValuesToIgnore(); 1033 1034 /// \returns The smallest bitwidth each instruction can be represented with. 1035 /// The vector equivalents of these instructions should be truncated to this 1036 /// type. 1037 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1038 return MinBWs; 1039 } 1040 1041 /// \returns True if it is more profitable to scalarize instruction \p I for 1042 /// vectorization factor \p VF. 1043 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1044 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 1045 1046 // Cost model is not run in the VPlan-native path - return conservative 1047 // result until this changes. 1048 if (EnableVPlanNativePath) 1049 return false; 1050 1051 auto Scalars = InstsToScalarize.find(VF); 1052 assert(Scalars != InstsToScalarize.end() && 1053 "VF not yet analyzed for scalarization profitability"); 1054 return Scalars->second.find(I) != Scalars->second.end(); 1055 } 1056 1057 /// Returns true if \p I is known to be uniform after vectorization. 1058 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1059 if (VF == 1) 1060 return true; 1061 1062 // Cost model is not run in the VPlan-native path - return conservative 1063 // result until this changes. 1064 if (EnableVPlanNativePath) 1065 return false; 1066 1067 auto UniformsPerVF = Uniforms.find(VF); 1068 assert(UniformsPerVF != Uniforms.end() && 1069 "VF not yet analyzed for uniformity"); 1070 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 1071 } 1072 1073 /// Returns true if \p I is known to be scalar after vectorization. 1074 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1075 if (VF == 1) 1076 return true; 1077 1078 // Cost model is not run in the VPlan-native path - return conservative 1079 // result until this changes. 1080 if (EnableVPlanNativePath) 1081 return false; 1082 1083 auto ScalarsPerVF = Scalars.find(VF); 1084 assert(ScalarsPerVF != Scalars.end() && 1085 "Scalar values are not calculated for VF"); 1086 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 1087 } 1088 1089 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1090 /// for vectorization factor \p VF. 1091 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1092 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 1093 !isProfitableToScalarize(I, VF) && 1094 !isScalarAfterVectorization(I, VF); 1095 } 1096 1097 /// Decision that was taken during cost calculation for memory instruction. 1098 enum InstWidening { 1099 CM_Unknown, 1100 CM_Widen, // For consecutive accesses with stride +1. 1101 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1102 CM_Interleave, 1103 CM_GatherScatter, 1104 CM_Scalarize 1105 }; 1106 1107 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1108 /// instruction \p I and vector width \p VF. 1109 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1110 unsigned Cost) { 1111 assert(VF >= 2 && "Expected VF >=2"); 1112 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1113 } 1114 1115 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1116 /// interleaving group \p Grp and vector width \p VF. 1117 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 1118 InstWidening W, unsigned Cost) { 1119 assert(VF >= 2 && "Expected VF >=2"); 1120 /// Broadcast this decicion to all instructions inside the group. 1121 /// But the cost will be assigned to one instruction only. 1122 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1123 if (auto *I = Grp->getMember(i)) { 1124 if (Grp->getInsertPos() == I) 1125 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1126 else 1127 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1128 } 1129 } 1130 } 1131 1132 /// Return the cost model decision for the given instruction \p I and vector 1133 /// width \p VF. Return CM_Unknown if this instruction did not pass 1134 /// through the cost modeling. 1135 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1136 assert(VF >= 2 && "Expected VF >=2"); 1137 1138 // Cost model is not run in the VPlan-native path - return conservative 1139 // result until this changes. 1140 if (EnableVPlanNativePath) 1141 return CM_GatherScatter; 1142 1143 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1144 auto Itr = WideningDecisions.find(InstOnVF); 1145 if (Itr == WideningDecisions.end()) 1146 return CM_Unknown; 1147 return Itr->second.first; 1148 } 1149 1150 /// Return the vectorization cost for the given instruction \p I and vector 1151 /// width \p VF. 1152 unsigned getWideningCost(Instruction *I, unsigned VF) { 1153 assert(VF >= 2 && "Expected VF >=2"); 1154 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1155 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1156 "The cost is not calculated"); 1157 return WideningDecisions[InstOnVF].second; 1158 } 1159 1160 /// Return True if instruction \p I is an optimizable truncate whose operand 1161 /// is an induction variable. Such a truncate will be removed by adding a new 1162 /// induction variable with the destination type. 1163 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1164 // If the instruction is not a truncate, return false. 1165 auto *Trunc = dyn_cast<TruncInst>(I); 1166 if (!Trunc) 1167 return false; 1168 1169 // Get the source and destination types of the truncate. 1170 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1171 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1172 1173 // If the truncate is free for the given types, return false. Replacing a 1174 // free truncate with an induction variable would add an induction variable 1175 // update instruction to each iteration of the loop. We exclude from this 1176 // check the primary induction variable since it will need an update 1177 // instruction regardless. 1178 Value *Op = Trunc->getOperand(0); 1179 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1180 return false; 1181 1182 // If the truncated value is not an induction variable, return false. 1183 return Legal->isInductionPhi(Op); 1184 } 1185 1186 /// Collects the instructions to scalarize for each predicated instruction in 1187 /// the loop. 1188 void collectInstsToScalarize(unsigned VF); 1189 1190 /// Collect Uniform and Scalar values for the given \p VF. 1191 /// The sets depend on CM decision for Load/Store instructions 1192 /// that may be vectorized as interleave, gather-scatter or scalarized. 1193 void collectUniformsAndScalars(unsigned VF) { 1194 // Do the analysis once. 1195 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1196 return; 1197 setCostBasedWideningDecision(VF); 1198 collectLoopUniforms(VF); 1199 collectLoopScalars(VF); 1200 } 1201 1202 /// Returns true if the target machine supports masked store operation 1203 /// for the given \p DataType and kind of access to \p Ptr. 1204 bool isLegalMaskedStore(Type *DataType, Value *Ptr, MaybeAlign Alignment) { 1205 return Legal->isConsecutivePtr(Ptr) && 1206 TTI.isLegalMaskedStore(DataType, Alignment); 1207 } 1208 1209 /// Returns true if the target machine supports masked load operation 1210 /// for the given \p DataType and kind of access to \p Ptr. 1211 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, MaybeAlign Alignment) { 1212 return Legal->isConsecutivePtr(Ptr) && 1213 TTI.isLegalMaskedLoad(DataType, Alignment); 1214 } 1215 1216 /// Returns true if the target machine supports masked scatter operation 1217 /// for the given \p DataType. 1218 bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) { 1219 return TTI.isLegalMaskedScatter(DataType, Alignment); 1220 } 1221 1222 /// Returns true if the target machine supports masked gather operation 1223 /// for the given \p DataType. 1224 bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) { 1225 return TTI.isLegalMaskedGather(DataType, Alignment); 1226 } 1227 1228 /// Returns true if the target machine can represent \p V as a masked gather 1229 /// or scatter operation. 1230 bool isLegalGatherOrScatter(Value *V) { 1231 bool LI = isa<LoadInst>(V); 1232 bool SI = isa<StoreInst>(V); 1233 if (!LI && !SI) 1234 return false; 1235 auto *Ty = getMemInstValueType(V); 1236 MaybeAlign Align = getLoadStoreAlignment(V); 1237 return (LI && isLegalMaskedGather(Ty, Align)) || 1238 (SI && isLegalMaskedScatter(Ty, Align)); 1239 } 1240 1241 /// Returns true if \p I is an instruction that will be scalarized with 1242 /// predication. Such instructions include conditional stores and 1243 /// instructions that may divide by zero. 1244 /// If a non-zero VF has been calculated, we check if I will be scalarized 1245 /// predication for that VF. 1246 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1247 1248 // Returns true if \p I is an instruction that will be predicated either 1249 // through scalar predication or masked load/store or masked gather/scatter. 1250 // Superset of instructions that return true for isScalarWithPredication. 1251 bool isPredicatedInst(Instruction *I) { 1252 if (!blockNeedsPredication(I->getParent())) 1253 return false; 1254 // Loads and stores that need some form of masked operation are predicated 1255 // instructions. 1256 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1257 return Legal->isMaskRequired(I); 1258 return isScalarWithPredication(I); 1259 } 1260 1261 /// Returns true if \p I is a memory instruction with consecutive memory 1262 /// access that can be widened. 1263 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1264 1265 /// Returns true if \p I is a memory instruction in an interleaved-group 1266 /// of memory accesses that can be vectorized with wide vector loads/stores 1267 /// and shuffles. 1268 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1269 1270 /// Check if \p Instr belongs to any interleaved access group. 1271 bool isAccessInterleaved(Instruction *Instr) { 1272 return InterleaveInfo.isInterleaved(Instr); 1273 } 1274 1275 /// Get the interleaved access group that \p Instr belongs to. 1276 const InterleaveGroup<Instruction> * 1277 getInterleavedAccessGroup(Instruction *Instr) { 1278 return InterleaveInfo.getInterleaveGroup(Instr); 1279 } 1280 1281 /// Returns true if an interleaved group requires a scalar iteration 1282 /// to handle accesses with gaps, and there is nothing preventing us from 1283 /// creating a scalar epilogue. 1284 bool requiresScalarEpilogue() const { 1285 return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue(); 1286 } 1287 1288 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1289 /// loop hint annotation. 1290 bool isScalarEpilogueAllowed() const { 1291 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1292 } 1293 1294 /// Returns true if all loop blocks should be masked to fold tail loop. 1295 bool foldTailByMasking() const { return FoldTailByMasking; } 1296 1297 bool blockNeedsPredication(BasicBlock *BB) { 1298 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1299 } 1300 1301 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1302 /// with factor VF. Return the cost of the instruction, including 1303 /// scalarization overhead if it's needed. 1304 unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF); 1305 1306 /// Estimate cost of a call instruction CI if it were vectorized with factor 1307 /// VF. Return the cost of the instruction, including scalarization overhead 1308 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1309 /// scalarized - 1310 /// i.e. either vector version isn't available, or is too expensive. 1311 unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize); 1312 1313 /// Invalidates decisions already taken by the cost model. 1314 void invalidateCostModelingDecisions() { 1315 WideningDecisions.clear(); 1316 Uniforms.clear(); 1317 Scalars.clear(); 1318 } 1319 1320 private: 1321 unsigned NumPredStores = 0; 1322 1323 /// \return An upper bound for the vectorization factor, larger than zero. 1324 /// One is returned if vectorization should best be avoided due to cost. 1325 unsigned computeFeasibleMaxVF(unsigned ConstTripCount); 1326 1327 /// The vectorization cost is a combination of the cost itself and a boolean 1328 /// indicating whether any of the contributing operations will actually 1329 /// operate on 1330 /// vector values after type legalization in the backend. If this latter value 1331 /// is 1332 /// false, then all operations will be scalarized (i.e. no vectorization has 1333 /// actually taken place). 1334 using VectorizationCostTy = std::pair<unsigned, bool>; 1335 1336 /// Returns the expected execution cost. The unit of the cost does 1337 /// not matter because we use the 'cost' units to compare different 1338 /// vector widths. The cost that is returned is *not* normalized by 1339 /// the factor width. 1340 VectorizationCostTy expectedCost(unsigned VF); 1341 1342 /// Returns the execution time cost of an instruction for a given vector 1343 /// width. Vector width of one means scalar. 1344 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1345 1346 /// The cost-computation logic from getInstructionCost which provides 1347 /// the vector type as an output parameter. 1348 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1349 1350 /// Calculate vectorization cost of memory instruction \p I. 1351 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1352 1353 /// The cost computation for scalarized memory instruction. 1354 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1355 1356 /// The cost computation for interleaving group of memory instructions. 1357 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1358 1359 /// The cost computation for Gather/Scatter instruction. 1360 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1361 1362 /// The cost computation for widening instruction \p I with consecutive 1363 /// memory access. 1364 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1365 1366 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1367 /// Load: scalar load + broadcast. 1368 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1369 /// element) 1370 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1371 1372 /// Estimate the overhead of scalarizing an instruction. This is a 1373 /// convenience wrapper for the type-based getScalarizationOverhead API. 1374 unsigned getScalarizationOverhead(Instruction *I, unsigned VF); 1375 1376 /// Returns whether the instruction is a load or store and will be a emitted 1377 /// as a vector operation. 1378 bool isConsecutiveLoadOrStore(Instruction *I); 1379 1380 /// Returns true if an artificially high cost for emulated masked memrefs 1381 /// should be used. 1382 bool useEmulatedMaskMemRefHack(Instruction *I); 1383 1384 /// Map of scalar integer values to the smallest bitwidth they can be legally 1385 /// represented as. The vector equivalents of these values should be truncated 1386 /// to this type. 1387 MapVector<Instruction *, uint64_t> MinBWs; 1388 1389 /// A type representing the costs for instructions if they were to be 1390 /// scalarized rather than vectorized. The entries are Instruction-Cost 1391 /// pairs. 1392 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1393 1394 /// A set containing all BasicBlocks that are known to present after 1395 /// vectorization as a predicated block. 1396 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1397 1398 /// Records whether it is allowed to have the original scalar loop execute at 1399 /// least once. This may be needed as a fallback loop in case runtime 1400 /// aliasing/dependence checks fail, or to handle the tail/remainder 1401 /// iterations when the trip count is unknown or doesn't divide by the VF, 1402 /// or as a peel-loop to handle gaps in interleave-groups. 1403 /// Under optsize and when the trip count is very small we don't allow any 1404 /// iterations to execute in the scalar loop. 1405 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1406 1407 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1408 bool FoldTailByMasking = false; 1409 1410 /// A map holding scalar costs for different vectorization factors. The 1411 /// presence of a cost for an instruction in the mapping indicates that the 1412 /// instruction will be scalarized when vectorizing with the associated 1413 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1414 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1415 1416 /// Holds the instructions known to be uniform after vectorization. 1417 /// The data is collected per VF. 1418 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1419 1420 /// Holds the instructions known to be scalar after vectorization. 1421 /// The data is collected per VF. 1422 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1423 1424 /// Holds the instructions (address computations) that are forced to be 1425 /// scalarized. 1426 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1427 1428 /// Returns the expected difference in cost from scalarizing the expression 1429 /// feeding a predicated instruction \p PredInst. The instructions to 1430 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1431 /// non-negative return value implies the expression will be scalarized. 1432 /// Currently, only single-use chains are considered for scalarization. 1433 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1434 unsigned VF); 1435 1436 /// Collect the instructions that are uniform after vectorization. An 1437 /// instruction is uniform if we represent it with a single scalar value in 1438 /// the vectorized loop corresponding to each vector iteration. Examples of 1439 /// uniform instructions include pointer operands of consecutive or 1440 /// interleaved memory accesses. Note that although uniformity implies an 1441 /// instruction will be scalar, the reverse is not true. In general, a 1442 /// scalarized instruction will be represented by VF scalar values in the 1443 /// vectorized loop, each corresponding to an iteration of the original 1444 /// scalar loop. 1445 void collectLoopUniforms(unsigned VF); 1446 1447 /// Collect the instructions that are scalar after vectorization. An 1448 /// instruction is scalar if it is known to be uniform or will be scalarized 1449 /// during vectorization. Non-uniform scalarized instructions will be 1450 /// represented by VF values in the vectorized loop, each corresponding to an 1451 /// iteration of the original scalar loop. 1452 void collectLoopScalars(unsigned VF); 1453 1454 /// Keeps cost model vectorization decision and cost for instructions. 1455 /// Right now it is used for memory instructions only. 1456 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1457 std::pair<InstWidening, unsigned>>; 1458 1459 DecisionList WideningDecisions; 1460 1461 /// Returns true if \p V is expected to be vectorized and it needs to be 1462 /// extracted. 1463 bool needsExtract(Value *V, unsigned VF) const { 1464 Instruction *I = dyn_cast<Instruction>(V); 1465 if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I)) 1466 return false; 1467 1468 // Assume we can vectorize V (and hence we need extraction) if the 1469 // scalars are not computed yet. This can happen, because it is called 1470 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1471 // the scalars are collected. That should be a safe assumption in most 1472 // cases, because we check if the operands have vectorizable types 1473 // beforehand in LoopVectorizationLegality. 1474 return Scalars.find(VF) == Scalars.end() || 1475 !isScalarAfterVectorization(I, VF); 1476 }; 1477 1478 /// Returns a range containing only operands needing to be extracted. 1479 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1480 unsigned VF) { 1481 return SmallVector<Value *, 4>(make_filter_range( 1482 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1483 } 1484 1485 public: 1486 /// The loop that we evaluate. 1487 Loop *TheLoop; 1488 1489 /// Predicated scalar evolution analysis. 1490 PredicatedScalarEvolution &PSE; 1491 1492 /// Loop Info analysis. 1493 LoopInfo *LI; 1494 1495 /// Vectorization legality. 1496 LoopVectorizationLegality *Legal; 1497 1498 /// Vector target information. 1499 const TargetTransformInfo &TTI; 1500 1501 /// Target Library Info. 1502 const TargetLibraryInfo *TLI; 1503 1504 /// Demanded bits analysis. 1505 DemandedBits *DB; 1506 1507 /// Assumption cache. 1508 AssumptionCache *AC; 1509 1510 /// Interface to emit optimization remarks. 1511 OptimizationRemarkEmitter *ORE; 1512 1513 const Function *TheFunction; 1514 1515 /// Loop Vectorize Hint. 1516 const LoopVectorizeHints *Hints; 1517 1518 /// The interleave access information contains groups of interleaved accesses 1519 /// with the same stride and close to each other. 1520 InterleavedAccessInfo &InterleaveInfo; 1521 1522 /// Values to ignore in the cost model. 1523 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1524 1525 /// Values to ignore in the cost model when VF > 1. 1526 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1527 }; 1528 1529 } // end namespace llvm 1530 1531 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1532 // vectorization. The loop needs to be annotated with #pragma omp simd 1533 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1534 // vector length information is not provided, vectorization is not considered 1535 // explicit. Interleave hints are not allowed either. These limitations will be 1536 // relaxed in the future. 1537 // Please, note that we are currently forced to abuse the pragma 'clang 1538 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1539 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1540 // provides *explicit vectorization hints* (LV can bypass legal checks and 1541 // assume that vectorization is legal). However, both hints are implemented 1542 // using the same metadata (llvm.loop.vectorize, processed by 1543 // LoopVectorizeHints). This will be fixed in the future when the native IR 1544 // representation for pragma 'omp simd' is introduced. 1545 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1546 OptimizationRemarkEmitter *ORE) { 1547 assert(!OuterLp->empty() && "This is not an outer loop"); 1548 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1549 1550 // Only outer loops with an explicit vectorization hint are supported. 1551 // Unannotated outer loops are ignored. 1552 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1553 return false; 1554 1555 Function *Fn = OuterLp->getHeader()->getParent(); 1556 if (!Hints.allowVectorization(Fn, OuterLp, 1557 true /*VectorizeOnlyWhenForced*/)) { 1558 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1559 return false; 1560 } 1561 1562 if (Hints.getInterleave() > 1) { 1563 // TODO: Interleave support is future work. 1564 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1565 "outer loops.\n"); 1566 Hints.emitRemarkWithHints(); 1567 return false; 1568 } 1569 1570 return true; 1571 } 1572 1573 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1574 OptimizationRemarkEmitter *ORE, 1575 SmallVectorImpl<Loop *> &V) { 1576 // Collect inner loops and outer loops without irreducible control flow. For 1577 // now, only collect outer loops that have explicit vectorization hints. If we 1578 // are stress testing the VPlan H-CFG construction, we collect the outermost 1579 // loop of every loop nest. 1580 if (L.empty() || VPlanBuildStressTest || 1581 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1582 LoopBlocksRPO RPOT(&L); 1583 RPOT.perform(LI); 1584 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1585 V.push_back(&L); 1586 // TODO: Collect inner loops inside marked outer loops in case 1587 // vectorization fails for the outer loop. Do not invoke 1588 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1589 // already known to be reducible. We can use an inherited attribute for 1590 // that. 1591 return; 1592 } 1593 } 1594 for (Loop *InnerL : L) 1595 collectSupportedLoops(*InnerL, LI, ORE, V); 1596 } 1597 1598 namespace { 1599 1600 /// The LoopVectorize Pass. 1601 struct LoopVectorize : public FunctionPass { 1602 /// Pass identification, replacement for typeid 1603 static char ID; 1604 1605 LoopVectorizePass Impl; 1606 1607 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1608 bool VectorizeOnlyWhenForced = false) 1609 : FunctionPass(ID), 1610 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1611 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1612 } 1613 1614 bool runOnFunction(Function &F) override { 1615 if (skipFunction(F)) 1616 return false; 1617 1618 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1619 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1620 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1621 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1622 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1623 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1624 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1625 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1626 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1627 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1628 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1629 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1630 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1631 1632 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1633 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1634 1635 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1636 GetLAA, *ORE, PSI).MadeAnyChange; 1637 } 1638 1639 void getAnalysisUsage(AnalysisUsage &AU) const override { 1640 AU.addRequired<AssumptionCacheTracker>(); 1641 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1642 AU.addRequired<DominatorTreeWrapperPass>(); 1643 AU.addRequired<LoopInfoWrapperPass>(); 1644 AU.addRequired<ScalarEvolutionWrapperPass>(); 1645 AU.addRequired<TargetTransformInfoWrapperPass>(); 1646 AU.addRequired<AAResultsWrapperPass>(); 1647 AU.addRequired<LoopAccessLegacyAnalysis>(); 1648 AU.addRequired<DemandedBitsWrapperPass>(); 1649 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1650 AU.addRequired<InjectTLIMappingsLegacy>(); 1651 1652 // We currently do not preserve loopinfo/dominator analyses with outer loop 1653 // vectorization. Until this is addressed, mark these analyses as preserved 1654 // only for non-VPlan-native path. 1655 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1656 if (!EnableVPlanNativePath) { 1657 AU.addPreserved<LoopInfoWrapperPass>(); 1658 AU.addPreserved<DominatorTreeWrapperPass>(); 1659 } 1660 1661 AU.addPreserved<BasicAAWrapperPass>(); 1662 AU.addPreserved<GlobalsAAWrapperPass>(); 1663 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1664 } 1665 }; 1666 1667 } // end anonymous namespace 1668 1669 //===----------------------------------------------------------------------===// 1670 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1671 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1672 //===----------------------------------------------------------------------===// 1673 1674 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1675 // We need to place the broadcast of invariant variables outside the loop, 1676 // but only if it's proven safe to do so. Else, broadcast will be inside 1677 // vector loop body. 1678 Instruction *Instr = dyn_cast<Instruction>(V); 1679 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1680 (!Instr || 1681 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1682 // Place the code for broadcasting invariant variables in the new preheader. 1683 IRBuilder<>::InsertPointGuard Guard(Builder); 1684 if (SafeToHoist) 1685 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1686 1687 // Broadcast the scalar into all locations in the vector. 1688 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1689 1690 return Shuf; 1691 } 1692 1693 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1694 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1695 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1696 "Expected either an induction phi-node or a truncate of it!"); 1697 Value *Start = II.getStartValue(); 1698 1699 // Construct the initial value of the vector IV in the vector loop preheader 1700 auto CurrIP = Builder.saveIP(); 1701 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1702 if (isa<TruncInst>(EntryVal)) { 1703 assert(Start->getType()->isIntegerTy() && 1704 "Truncation requires an integer type"); 1705 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1706 Step = Builder.CreateTrunc(Step, TruncType); 1707 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1708 } 1709 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1710 Value *SteppedStart = 1711 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1712 1713 // We create vector phi nodes for both integer and floating-point induction 1714 // variables. Here, we determine the kind of arithmetic we will perform. 1715 Instruction::BinaryOps AddOp; 1716 Instruction::BinaryOps MulOp; 1717 if (Step->getType()->isIntegerTy()) { 1718 AddOp = Instruction::Add; 1719 MulOp = Instruction::Mul; 1720 } else { 1721 AddOp = II.getInductionOpcode(); 1722 MulOp = Instruction::FMul; 1723 } 1724 1725 // Multiply the vectorization factor by the step using integer or 1726 // floating-point arithmetic as appropriate. 1727 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1728 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1729 1730 // Create a vector splat to use in the induction update. 1731 // 1732 // FIXME: If the step is non-constant, we create the vector splat with 1733 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1734 // handle a constant vector splat. 1735 Value *SplatVF = 1736 isa<Constant>(Mul) 1737 ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul)) 1738 : Builder.CreateVectorSplat(VF, Mul); 1739 Builder.restoreIP(CurrIP); 1740 1741 // We may need to add the step a number of times, depending on the unroll 1742 // factor. The last of those goes into the PHI. 1743 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1744 &*LoopVectorBody->getFirstInsertionPt()); 1745 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1746 Instruction *LastInduction = VecInd; 1747 for (unsigned Part = 0; Part < UF; ++Part) { 1748 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1749 1750 if (isa<TruncInst>(EntryVal)) 1751 addMetadata(LastInduction, EntryVal); 1752 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1753 1754 LastInduction = cast<Instruction>(addFastMathFlag( 1755 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1756 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1757 } 1758 1759 // Move the last step to the end of the latch block. This ensures consistent 1760 // placement of all induction updates. 1761 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1762 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1763 auto *ICmp = cast<Instruction>(Br->getCondition()); 1764 LastInduction->moveBefore(ICmp); 1765 LastInduction->setName("vec.ind.next"); 1766 1767 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1768 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1769 } 1770 1771 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1772 return Cost->isScalarAfterVectorization(I, VF) || 1773 Cost->isProfitableToScalarize(I, VF); 1774 } 1775 1776 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1777 if (shouldScalarizeInstruction(IV)) 1778 return true; 1779 auto isScalarInst = [&](User *U) -> bool { 1780 auto *I = cast<Instruction>(U); 1781 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1782 }; 1783 return llvm::any_of(IV->users(), isScalarInst); 1784 } 1785 1786 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1787 const InductionDescriptor &ID, const Instruction *EntryVal, 1788 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1789 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1790 "Expected either an induction phi-node or a truncate of it!"); 1791 1792 // This induction variable is not the phi from the original loop but the 1793 // newly-created IV based on the proof that casted Phi is equal to the 1794 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1795 // re-uses the same InductionDescriptor that original IV uses but we don't 1796 // have to do any recording in this case - that is done when original IV is 1797 // processed. 1798 if (isa<TruncInst>(EntryVal)) 1799 return; 1800 1801 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1802 if (Casts.empty()) 1803 return; 1804 // Only the first Cast instruction in the Casts vector is of interest. 1805 // The rest of the Casts (if exist) have no uses outside the 1806 // induction update chain itself. 1807 Instruction *CastInst = *Casts.begin(); 1808 if (Lane < UINT_MAX) 1809 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1810 else 1811 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1812 } 1813 1814 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1815 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1816 "Primary induction variable must have an integer type"); 1817 1818 auto II = Legal->getInductionVars().find(IV); 1819 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 1820 1821 auto ID = II->second; 1822 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1823 1824 // The value from the original loop to which we are mapping the new induction 1825 // variable. 1826 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1827 1828 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1829 1830 // Generate code for the induction step. Note that induction steps are 1831 // required to be loop-invariant 1832 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 1833 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 1834 "Induction step should be loop invariant"); 1835 if (PSE.getSE()->isSCEVable(IV->getType())) { 1836 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1837 return Exp.expandCodeFor(Step, Step->getType(), 1838 LoopVectorPreHeader->getTerminator()); 1839 } 1840 return cast<SCEVUnknown>(Step)->getValue(); 1841 }; 1842 1843 // The scalar value to broadcast. This is derived from the canonical 1844 // induction variable. If a truncation type is given, truncate the canonical 1845 // induction variable and step. Otherwise, derive these values from the 1846 // induction descriptor. 1847 auto CreateScalarIV = [&](Value *&Step) -> Value * { 1848 Value *ScalarIV = Induction; 1849 if (IV != OldInduction) { 1850 ScalarIV = IV->getType()->isIntegerTy() 1851 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1852 : Builder.CreateCast(Instruction::SIToFP, Induction, 1853 IV->getType()); 1854 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1855 ScalarIV->setName("offset.idx"); 1856 } 1857 if (Trunc) { 1858 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1859 assert(Step->getType()->isIntegerTy() && 1860 "Truncation requires an integer step"); 1861 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1862 Step = Builder.CreateTrunc(Step, TruncType); 1863 } 1864 return ScalarIV; 1865 }; 1866 1867 // Create the vector values from the scalar IV, in the absence of creating a 1868 // vector IV. 1869 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 1870 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1871 for (unsigned Part = 0; Part < UF; ++Part) { 1872 Value *EntryPart = 1873 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1874 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1875 if (Trunc) 1876 addMetadata(EntryPart, Trunc); 1877 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1878 } 1879 }; 1880 1881 // Now do the actual transformations, and start with creating the step value. 1882 Value *Step = CreateStepValue(ID.getStep()); 1883 if (VF <= 1) { 1884 Value *ScalarIV = CreateScalarIV(Step); 1885 CreateSplatIV(ScalarIV, Step); 1886 return; 1887 } 1888 1889 // Determine if we want a scalar version of the induction variable. This is 1890 // true if the induction variable itself is not widened, or if it has at 1891 // least one user in the loop that is not widened. 1892 auto NeedsScalarIV = needsScalarInduction(EntryVal); 1893 if (!NeedsScalarIV) { 1894 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1895 return; 1896 } 1897 1898 // Try to create a new independent vector induction variable. If we can't 1899 // create the phi node, we will splat the scalar induction variable in each 1900 // loop iteration. 1901 if (!shouldScalarizeInstruction(EntryVal)) { 1902 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1903 Value *ScalarIV = CreateScalarIV(Step); 1904 // Create scalar steps that can be used by instructions we will later 1905 // scalarize. Note that the addition of the scalar steps will not increase 1906 // the number of instructions in the loop in the common case prior to 1907 // InstCombine. We will be trading one vector extract for each scalar step. 1908 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1909 return; 1910 } 1911 1912 // If we haven't yet vectorized the induction variable, splat the scalar 1913 // induction variable, and build the necessary step vectors. 1914 // TODO: Don't do it unless the vectorized IV is really required. 1915 Value *ScalarIV = CreateScalarIV(Step); 1916 CreateSplatIV(ScalarIV, Step); 1917 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1918 } 1919 1920 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1921 Instruction::BinaryOps BinOp) { 1922 // Create and check the types. 1923 auto *ValVTy = cast<VectorType>(Val->getType()); 1924 int VLen = ValVTy->getNumElements(); 1925 1926 Type *STy = Val->getType()->getScalarType(); 1927 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1928 "Induction Step must be an integer or FP"); 1929 assert(Step->getType() == STy && "Step has wrong type"); 1930 1931 SmallVector<Constant *, 8> Indices; 1932 1933 if (STy->isIntegerTy()) { 1934 // Create a vector of consecutive numbers from zero to VF. 1935 for (int i = 0; i < VLen; ++i) 1936 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1937 1938 // Add the consecutive indices to the vector value. 1939 Constant *Cv = ConstantVector::get(Indices); 1940 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1941 Step = Builder.CreateVectorSplat(VLen, Step); 1942 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1943 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1944 // which can be found from the original scalar operations. 1945 Step = Builder.CreateMul(Cv, Step); 1946 return Builder.CreateAdd(Val, Step, "induction"); 1947 } 1948 1949 // Floating point induction. 1950 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1951 "Binary Opcode should be specified for FP induction"); 1952 // Create a vector of consecutive numbers from zero to VF. 1953 for (int i = 0; i < VLen; ++i) 1954 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1955 1956 // Add the consecutive indices to the vector value. 1957 Constant *Cv = ConstantVector::get(Indices); 1958 1959 Step = Builder.CreateVectorSplat(VLen, Step); 1960 1961 // Floating point operations had to be 'fast' to enable the induction. 1962 FastMathFlags Flags; 1963 Flags.setFast(); 1964 1965 Value *MulOp = Builder.CreateFMul(Cv, Step); 1966 if (isa<Instruction>(MulOp)) 1967 // Have to check, MulOp may be a constant 1968 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1969 1970 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1971 if (isa<Instruction>(BOp)) 1972 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1973 return BOp; 1974 } 1975 1976 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1977 Instruction *EntryVal, 1978 const InductionDescriptor &ID) { 1979 // We shouldn't have to build scalar steps if we aren't vectorizing. 1980 assert(VF > 1 && "VF should be greater than one"); 1981 1982 // Get the value type and ensure it and the step have the same integer type. 1983 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1984 assert(ScalarIVTy == Step->getType() && 1985 "Val and Step should have the same type"); 1986 1987 // We build scalar steps for both integer and floating-point induction 1988 // variables. Here, we determine the kind of arithmetic we will perform. 1989 Instruction::BinaryOps AddOp; 1990 Instruction::BinaryOps MulOp; 1991 if (ScalarIVTy->isIntegerTy()) { 1992 AddOp = Instruction::Add; 1993 MulOp = Instruction::Mul; 1994 } else { 1995 AddOp = ID.getInductionOpcode(); 1996 MulOp = Instruction::FMul; 1997 } 1998 1999 // Determine the number of scalars we need to generate for each unroll 2000 // iteration. If EntryVal is uniform, we only need to generate the first 2001 // lane. Otherwise, we generate all VF values. 2002 unsigned Lanes = 2003 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 2004 : VF; 2005 // Compute the scalar steps and save the results in VectorLoopValueMap. 2006 for (unsigned Part = 0; Part < UF; ++Part) { 2007 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2008 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2009 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2010 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2011 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2012 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 2013 } 2014 } 2015 } 2016 2017 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2018 assert(V != Induction && "The new induction variable should not be used."); 2019 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2020 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2021 2022 // If we have a stride that is replaced by one, do it here. Defer this for 2023 // the VPlan-native path until we start running Legal checks in that path. 2024 if (!EnableVPlanNativePath && Legal->hasStride(V)) 2025 V = ConstantInt::get(V->getType(), 1); 2026 2027 // If we have a vector mapped to this value, return it. 2028 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2029 return VectorLoopValueMap.getVectorValue(V, Part); 2030 2031 // If the value has not been vectorized, check if it has been scalarized 2032 // instead. If it has been scalarized, and we actually need the value in 2033 // vector form, we will construct the vector values on demand. 2034 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2035 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2036 2037 // If we've scalarized a value, that value should be an instruction. 2038 auto *I = cast<Instruction>(V); 2039 2040 // If we aren't vectorizing, we can just copy the scalar map values over to 2041 // the vector map. 2042 if (VF == 1) { 2043 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2044 return ScalarValue; 2045 } 2046 2047 // Get the last scalar instruction we generated for V and Part. If the value 2048 // is known to be uniform after vectorization, this corresponds to lane zero 2049 // of the Part unroll iteration. Otherwise, the last instruction is the one 2050 // we created for the last vector lane of the Part unroll iteration. 2051 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2052 auto *LastInst = cast<Instruction>( 2053 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2054 2055 // Set the insert point after the last scalarized instruction. This ensures 2056 // the insertelement sequence will directly follow the scalar definitions. 2057 auto OldIP = Builder.saveIP(); 2058 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2059 Builder.SetInsertPoint(&*NewIP); 2060 2061 // However, if we are vectorizing, we need to construct the vector values. 2062 // If the value is known to be uniform after vectorization, we can just 2063 // broadcast the scalar value corresponding to lane zero for each unroll 2064 // iteration. Otherwise, we construct the vector values using insertelement 2065 // instructions. Since the resulting vectors are stored in 2066 // VectorLoopValueMap, we will only generate the insertelements once. 2067 Value *VectorValue = nullptr; 2068 if (Cost->isUniformAfterVectorization(I, VF)) { 2069 VectorValue = getBroadcastInstrs(ScalarValue); 2070 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2071 } else { 2072 // Initialize packing with insertelements to start from undef. 2073 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 2074 VectorLoopValueMap.setVectorValue(V, Part, Undef); 2075 for (unsigned Lane = 0; Lane < VF; ++Lane) 2076 packScalarIntoVectorValue(V, {Part, Lane}); 2077 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2078 } 2079 Builder.restoreIP(OldIP); 2080 return VectorValue; 2081 } 2082 2083 // If this scalar is unknown, assume that it is a constant or that it is 2084 // loop invariant. Broadcast V and save the value for future uses. 2085 Value *B = getBroadcastInstrs(V); 2086 VectorLoopValueMap.setVectorValue(V, Part, B); 2087 return B; 2088 } 2089 2090 Value * 2091 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2092 const VPIteration &Instance) { 2093 // If the value is not an instruction contained in the loop, it should 2094 // already be scalar. 2095 if (OrigLoop->isLoopInvariant(V)) 2096 return V; 2097 2098 assert(Instance.Lane > 0 2099 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2100 : true && "Uniform values only have lane zero"); 2101 2102 // If the value from the original loop has not been vectorized, it is 2103 // represented by UF x VF scalar values in the new loop. Return the requested 2104 // scalar value. 2105 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2106 return VectorLoopValueMap.getScalarValue(V, Instance); 2107 2108 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2109 // for the given unroll part. If this entry is not a vector type (i.e., the 2110 // vectorization factor is one), there is no need to generate an 2111 // extractelement instruction. 2112 auto *U = getOrCreateVectorValue(V, Instance.Part); 2113 if (!U->getType()->isVectorTy()) { 2114 assert(VF == 1 && "Value not scalarized has non-vector type"); 2115 return U; 2116 } 2117 2118 // Otherwise, the value from the original loop has been vectorized and is 2119 // represented by UF vector values. Extract and return the requested scalar 2120 // value from the appropriate vector lane. 2121 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2122 } 2123 2124 void InnerLoopVectorizer::packScalarIntoVectorValue( 2125 Value *V, const VPIteration &Instance) { 2126 assert(V != Induction && "The new induction variable should not be used."); 2127 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2128 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2129 2130 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2131 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2132 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2133 Builder.getInt32(Instance.Lane)); 2134 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2135 } 2136 2137 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2138 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2139 SmallVector<int, 8> ShuffleMask; 2140 for (unsigned i = 0; i < VF; ++i) 2141 ShuffleMask.push_back(VF - i - 1); 2142 2143 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2144 ShuffleMask, "reverse"); 2145 } 2146 2147 // Return whether we allow using masked interleave-groups (for dealing with 2148 // strided loads/stores that reside in predicated blocks, or for dealing 2149 // with gaps). 2150 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2151 // If an override option has been passed in for interleaved accesses, use it. 2152 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2153 return EnableMaskedInterleavedMemAccesses; 2154 2155 return TTI.enableMaskedInterleavedAccessVectorization(); 2156 } 2157 2158 // Try to vectorize the interleave group that \p Instr belongs to. 2159 // 2160 // E.g. Translate following interleaved load group (factor = 3): 2161 // for (i = 0; i < N; i+=3) { 2162 // R = Pic[i]; // Member of index 0 2163 // G = Pic[i+1]; // Member of index 1 2164 // B = Pic[i+2]; // Member of index 2 2165 // ... // do something to R, G, B 2166 // } 2167 // To: 2168 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2169 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2170 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2171 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2172 // 2173 // Or translate following interleaved store group (factor = 3): 2174 // for (i = 0; i < N; i+=3) { 2175 // ... do something to R, G, B 2176 // Pic[i] = R; // Member of index 0 2177 // Pic[i+1] = G; // Member of index 1 2178 // Pic[i+2] = B; // Member of index 2 2179 // } 2180 // To: 2181 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2182 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2183 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2184 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2185 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2186 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2187 const InterleaveGroup<Instruction> *Group, VPTransformState &State, 2188 VPValue *Addr, VPValue *BlockInMask) { 2189 Instruction *Instr = Group->getInsertPos(); 2190 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2191 2192 // Prepare for the vector type of the interleaved load/store. 2193 Type *ScalarTy = getMemInstValueType(Instr); 2194 unsigned InterleaveFactor = Group->getFactor(); 2195 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2196 2197 // Prepare for the new pointers. 2198 SmallVector<Value *, 2> AddrParts; 2199 unsigned Index = Group->getIndex(Instr); 2200 2201 // TODO: extend the masked interleaved-group support to reversed access. 2202 assert((!BlockInMask || !Group->isReverse()) && 2203 "Reversed masked interleave-group not supported."); 2204 2205 // If the group is reverse, adjust the index to refer to the last vector lane 2206 // instead of the first. We adjust the index from the first vector lane, 2207 // rather than directly getting the pointer for lane VF - 1, because the 2208 // pointer operand of the interleaved access is supposed to be uniform. For 2209 // uniform instructions, we're only required to generate a value for the 2210 // first vector lane in each unroll iteration. 2211 if (Group->isReverse()) 2212 Index += (VF - 1) * Group->getFactor(); 2213 2214 for (unsigned Part = 0; Part < UF; Part++) { 2215 Value *AddrPart = State.get(Addr, {Part, 0}); 2216 setDebugLocFromInst(Builder, AddrPart); 2217 2218 // Notice current instruction could be any index. Need to adjust the address 2219 // to the member of index 0. 2220 // 2221 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2222 // b = A[i]; // Member of index 0 2223 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2224 // 2225 // E.g. A[i+1] = a; // Member of index 1 2226 // A[i] = b; // Member of index 0 2227 // A[i+2] = c; // Member of index 2 (Current instruction) 2228 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2229 2230 bool InBounds = false; 2231 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2232 InBounds = gep->isInBounds(); 2233 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2234 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2235 2236 // Cast to the vector pointer type. 2237 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2238 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2239 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2240 } 2241 2242 setDebugLocFromInst(Builder, Instr); 2243 Value *UndefVec = UndefValue::get(VecTy); 2244 2245 Value *MaskForGaps = nullptr; 2246 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2247 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2248 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2249 } 2250 2251 // Vectorize the interleaved load group. 2252 if (isa<LoadInst>(Instr)) { 2253 // For each unroll part, create a wide load for the group. 2254 SmallVector<Value *, 2> NewLoads; 2255 for (unsigned Part = 0; Part < UF; Part++) { 2256 Instruction *NewLoad; 2257 if (BlockInMask || MaskForGaps) { 2258 assert(useMaskedInterleavedAccesses(*TTI) && 2259 "masked interleaved groups are not allowed."); 2260 Value *GroupMask = MaskForGaps; 2261 if (BlockInMask) { 2262 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2263 auto *Undefs = UndefValue::get(BlockInMaskPart->getType()); 2264 Value *ShuffledMask = Builder.CreateShuffleVector( 2265 BlockInMaskPart, Undefs, 2266 createReplicatedMask(InterleaveFactor, VF), "interleaved.mask"); 2267 GroupMask = MaskForGaps 2268 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2269 MaskForGaps) 2270 : ShuffledMask; 2271 } 2272 NewLoad = 2273 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2274 GroupMask, UndefVec, "wide.masked.vec"); 2275 } 2276 else 2277 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2278 Group->getAlign(), "wide.vec"); 2279 Group->addMetadata(NewLoad); 2280 NewLoads.push_back(NewLoad); 2281 } 2282 2283 // For each member in the group, shuffle out the appropriate data from the 2284 // wide loads. 2285 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2286 Instruction *Member = Group->getMember(I); 2287 2288 // Skip the gaps in the group. 2289 if (!Member) 2290 continue; 2291 2292 auto StrideMask = createStrideMask(I, InterleaveFactor, VF); 2293 for (unsigned Part = 0; Part < UF; Part++) { 2294 Value *StridedVec = Builder.CreateShuffleVector( 2295 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2296 2297 // If this member has different type, cast the result type. 2298 if (Member->getType() != ScalarTy) { 2299 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2300 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2301 } 2302 2303 if (Group->isReverse()) 2304 StridedVec = reverseVector(StridedVec); 2305 2306 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2307 } 2308 } 2309 return; 2310 } 2311 2312 // The sub vector type for current instruction. 2313 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2314 2315 // Vectorize the interleaved store group. 2316 for (unsigned Part = 0; Part < UF; Part++) { 2317 // Collect the stored vector from each member. 2318 SmallVector<Value *, 4> StoredVecs; 2319 for (unsigned i = 0; i < InterleaveFactor; i++) { 2320 // Interleaved store group doesn't allow a gap, so each index has a member 2321 Instruction *Member = Group->getMember(i); 2322 assert(Member && "Fail to get a member from an interleaved store group"); 2323 2324 Value *StoredVec = getOrCreateVectorValue( 2325 cast<StoreInst>(Member)->getValueOperand(), Part); 2326 if (Group->isReverse()) 2327 StoredVec = reverseVector(StoredVec); 2328 2329 // If this member has different type, cast it to a unified type. 2330 2331 if (StoredVec->getType() != SubVT) 2332 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2333 2334 StoredVecs.push_back(StoredVec); 2335 } 2336 2337 // Concatenate all vectors into a wide vector. 2338 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2339 2340 // Interleave the elements in the wide vector. 2341 Value *IVec = Builder.CreateShuffleVector( 2342 WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor), 2343 "interleaved.vec"); 2344 2345 Instruction *NewStoreInstr; 2346 if (BlockInMask) { 2347 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2348 auto *Undefs = UndefValue::get(BlockInMaskPart->getType()); 2349 Value *ShuffledMask = Builder.CreateShuffleVector( 2350 BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF), 2351 "interleaved.mask"); 2352 NewStoreInstr = Builder.CreateMaskedStore( 2353 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2354 } 2355 else 2356 NewStoreInstr = 2357 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2358 2359 Group->addMetadata(NewStoreInstr); 2360 } 2361 } 2362 2363 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2364 VPTransformState &State, 2365 VPValue *Addr, 2366 VPValue *StoredValue, 2367 VPValue *BlockInMask) { 2368 // Attempt to issue a wide load. 2369 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2370 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2371 2372 assert((LI || SI) && "Invalid Load/Store instruction"); 2373 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2374 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2375 2376 LoopVectorizationCostModel::InstWidening Decision = 2377 Cost->getWideningDecision(Instr, VF); 2378 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2379 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2380 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2381 "CM decision is not to widen the memory instruction"); 2382 2383 Type *ScalarDataTy = getMemInstValueType(Instr); 2384 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2385 // An alignment of 0 means target abi alignment. We need to use the scalar's 2386 // target abi alignment in such a case. 2387 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2388 const Align Alignment = 2389 DL.getValueOrABITypeAlignment(getLoadStoreAlignment(Instr), ScalarDataTy); 2390 2391 // Determine if the pointer operand of the access is either consecutive or 2392 // reverse consecutive. 2393 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2394 bool ConsecutiveStride = 2395 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2396 bool CreateGatherScatter = 2397 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2398 2399 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2400 // gather/scatter. Otherwise Decision should have been to Scalarize. 2401 assert((ConsecutiveStride || CreateGatherScatter) && 2402 "The instruction should be scalarized"); 2403 (void)ConsecutiveStride; 2404 2405 VectorParts BlockInMaskParts(UF); 2406 bool isMaskRequired = BlockInMask; 2407 if (isMaskRequired) 2408 for (unsigned Part = 0; Part < UF; ++Part) 2409 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2410 2411 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2412 // Calculate the pointer for the specific unroll-part. 2413 GetElementPtrInst *PartPtr = nullptr; 2414 2415 bool InBounds = false; 2416 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2417 InBounds = gep->isInBounds(); 2418 2419 if (Reverse) { 2420 // If the address is consecutive but reversed, then the 2421 // wide store needs to start at the last vector element. 2422 PartPtr = cast<GetElementPtrInst>( 2423 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF))); 2424 PartPtr->setIsInBounds(InBounds); 2425 PartPtr = cast<GetElementPtrInst>( 2426 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF))); 2427 PartPtr->setIsInBounds(InBounds); 2428 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2429 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2430 } else { 2431 PartPtr = cast<GetElementPtrInst>( 2432 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF))); 2433 PartPtr->setIsInBounds(InBounds); 2434 } 2435 2436 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2437 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2438 }; 2439 2440 // Handle Stores: 2441 if (SI) { 2442 setDebugLocFromInst(Builder, SI); 2443 2444 for (unsigned Part = 0; Part < UF; ++Part) { 2445 Instruction *NewSI = nullptr; 2446 Value *StoredVal = State.get(StoredValue, Part); 2447 if (CreateGatherScatter) { 2448 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2449 Value *VectorGep = State.get(Addr, Part); 2450 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2451 MaskPart); 2452 } else { 2453 if (Reverse) { 2454 // If we store to reverse consecutive memory locations, then we need 2455 // to reverse the order of elements in the stored value. 2456 StoredVal = reverseVector(StoredVal); 2457 // We don't want to update the value in the map as it might be used in 2458 // another expression. So don't call resetVectorValue(StoredVal). 2459 } 2460 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2461 if (isMaskRequired) 2462 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2463 BlockInMaskParts[Part]); 2464 else 2465 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2466 } 2467 addMetadata(NewSI, SI); 2468 } 2469 return; 2470 } 2471 2472 // Handle loads. 2473 assert(LI && "Must have a load instruction"); 2474 setDebugLocFromInst(Builder, LI); 2475 for (unsigned Part = 0; Part < UF; ++Part) { 2476 Value *NewLI; 2477 if (CreateGatherScatter) { 2478 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2479 Value *VectorGep = State.get(Addr, Part); 2480 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2481 nullptr, "wide.masked.gather"); 2482 addMetadata(NewLI, LI); 2483 } else { 2484 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2485 if (isMaskRequired) 2486 NewLI = Builder.CreateMaskedLoad( 2487 VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy), 2488 "wide.masked.load"); 2489 else 2490 NewLI = 2491 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2492 2493 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2494 addMetadata(NewLI, LI); 2495 if (Reverse) 2496 NewLI = reverseVector(NewLI); 2497 } 2498 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2499 } 2500 } 2501 2502 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2503 const VPIteration &Instance, 2504 bool IfPredicateInstr) { 2505 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2506 2507 setDebugLocFromInst(Builder, Instr); 2508 2509 // Does this instruction return a value ? 2510 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2511 2512 Instruction *Cloned = Instr->clone(); 2513 if (!IsVoidRetTy) 2514 Cloned->setName(Instr->getName() + ".cloned"); 2515 2516 // Replace the operands of the cloned instructions with their scalar 2517 // equivalents in the new loop. 2518 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2519 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2520 Cloned->setOperand(op, NewOp); 2521 } 2522 addNewMetadata(Cloned, Instr); 2523 2524 // Place the cloned scalar in the new loop. 2525 Builder.Insert(Cloned); 2526 2527 // Add the cloned scalar to the scalar map entry. 2528 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2529 2530 // If we just cloned a new assumption, add it the assumption cache. 2531 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2532 if (II->getIntrinsicID() == Intrinsic::assume) 2533 AC->registerAssumption(II); 2534 2535 // End if-block. 2536 if (IfPredicateInstr) 2537 PredicatedInstructions.push_back(Cloned); 2538 } 2539 2540 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2541 Value *End, Value *Step, 2542 Instruction *DL) { 2543 BasicBlock *Header = L->getHeader(); 2544 BasicBlock *Latch = L->getLoopLatch(); 2545 // As we're just creating this loop, it's possible no latch exists 2546 // yet. If so, use the header as this will be a single block loop. 2547 if (!Latch) 2548 Latch = Header; 2549 2550 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2551 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2552 setDebugLocFromInst(Builder, OldInst); 2553 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2554 2555 Builder.SetInsertPoint(Latch->getTerminator()); 2556 setDebugLocFromInst(Builder, OldInst); 2557 2558 // Create i+1 and fill the PHINode. 2559 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2560 Induction->addIncoming(Start, L->getLoopPreheader()); 2561 Induction->addIncoming(Next, Latch); 2562 // Create the compare. 2563 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2564 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2565 2566 // Now we have two terminators. Remove the old one from the block. 2567 Latch->getTerminator()->eraseFromParent(); 2568 2569 return Induction; 2570 } 2571 2572 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2573 if (TripCount) 2574 return TripCount; 2575 2576 assert(L && "Create Trip Count for null loop."); 2577 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2578 // Find the loop boundaries. 2579 ScalarEvolution *SE = PSE.getSE(); 2580 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2581 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2582 "Invalid loop count"); 2583 2584 Type *IdxTy = Legal->getWidestInductionType(); 2585 assert(IdxTy && "No type for induction"); 2586 2587 // The exit count might have the type of i64 while the phi is i32. This can 2588 // happen if we have an induction variable that is sign extended before the 2589 // compare. The only way that we get a backedge taken count is that the 2590 // induction variable was signed and as such will not overflow. In such a case 2591 // truncation is legal. 2592 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2593 IdxTy->getPrimitiveSizeInBits()) 2594 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2595 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2596 2597 // Get the total trip count from the count by adding 1. 2598 const SCEV *ExitCount = SE->getAddExpr( 2599 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2600 2601 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2602 2603 // Expand the trip count and place the new instructions in the preheader. 2604 // Notice that the pre-header does not change, only the loop body. 2605 SCEVExpander Exp(*SE, DL, "induction"); 2606 2607 // Count holds the overall loop count (N). 2608 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2609 L->getLoopPreheader()->getTerminator()); 2610 2611 if (TripCount->getType()->isPointerTy()) 2612 TripCount = 2613 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2614 L->getLoopPreheader()->getTerminator()); 2615 2616 return TripCount; 2617 } 2618 2619 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2620 if (VectorTripCount) 2621 return VectorTripCount; 2622 2623 Value *TC = getOrCreateTripCount(L); 2624 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2625 2626 Type *Ty = TC->getType(); 2627 Constant *Step = ConstantInt::get(Ty, VF * UF); 2628 2629 // If the tail is to be folded by masking, round the number of iterations N 2630 // up to a multiple of Step instead of rounding down. This is done by first 2631 // adding Step-1 and then rounding down. Note that it's ok if this addition 2632 // overflows: the vector induction variable will eventually wrap to zero given 2633 // that it starts at zero and its Step is a power of two; the loop will then 2634 // exit, with the last early-exit vector comparison also producing all-true. 2635 if (Cost->foldTailByMasking()) { 2636 assert(isPowerOf2_32(VF * UF) && 2637 "VF*UF must be a power of 2 when folding tail by masking"); 2638 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2639 } 2640 2641 // Now we need to generate the expression for the part of the loop that the 2642 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2643 // iterations are not required for correctness, or N - Step, otherwise. Step 2644 // is equal to the vectorization factor (number of SIMD elements) times the 2645 // unroll factor (number of SIMD instructions). 2646 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2647 2648 // If there is a non-reversed interleaved group that may speculatively access 2649 // memory out-of-bounds, we need to ensure that there will be at least one 2650 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2651 // the trip count, we set the remainder to be equal to the step. If the step 2652 // does not evenly divide the trip count, no adjustment is necessary since 2653 // there will already be scalar iterations. Note that the minimum iterations 2654 // check ensures that N >= Step. 2655 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2656 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2657 R = Builder.CreateSelect(IsZero, Step, R); 2658 } 2659 2660 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2661 2662 return VectorTripCount; 2663 } 2664 2665 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2666 const DataLayout &DL) { 2667 // Verify that V is a vector type with same number of elements as DstVTy. 2668 unsigned VF = DstVTy->getNumElements(); 2669 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2670 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2671 Type *SrcElemTy = SrcVecTy->getElementType(); 2672 Type *DstElemTy = DstVTy->getElementType(); 2673 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2674 "Vector elements must have same size"); 2675 2676 // Do a direct cast if element types are castable. 2677 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2678 return Builder.CreateBitOrPointerCast(V, DstVTy); 2679 } 2680 // V cannot be directly casted to desired vector type. 2681 // May happen when V is a floating point vector but DstVTy is a vector of 2682 // pointers or vice-versa. Handle this using a two-step bitcast using an 2683 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2684 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2685 "Only one type should be a pointer type"); 2686 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2687 "Only one type should be a floating point type"); 2688 Type *IntTy = 2689 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2690 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2691 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2692 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2693 } 2694 2695 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2696 BasicBlock *Bypass) { 2697 Value *Count = getOrCreateTripCount(L); 2698 // Reuse existing vector loop preheader for TC checks. 2699 // Note that new preheader block is generated for vector loop. 2700 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2701 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2702 2703 // Generate code to check if the loop's trip count is less than VF * UF, or 2704 // equal to it in case a scalar epilogue is required; this implies that the 2705 // vector trip count is zero. This check also covers the case where adding one 2706 // to the backedge-taken count overflowed leading to an incorrect trip count 2707 // of zero. In this case we will also jump to the scalar loop. 2708 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2709 : ICmpInst::ICMP_ULT; 2710 2711 // If tail is to be folded, vector loop takes care of all iterations. 2712 Value *CheckMinIters = Builder.getFalse(); 2713 if (!Cost->foldTailByMasking()) 2714 CheckMinIters = Builder.CreateICmp( 2715 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2716 "min.iters.check"); 2717 2718 // Create new preheader for vector loop. 2719 LoopVectorPreHeader = 2720 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2721 "vector.ph"); 2722 2723 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2724 DT->getNode(Bypass)->getIDom()) && 2725 "TC check is expected to dominate Bypass"); 2726 2727 // Update dominator for Bypass & LoopExit. 2728 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2729 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2730 2731 ReplaceInstWithInst( 2732 TCCheckBlock->getTerminator(), 2733 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2734 LoopBypassBlocks.push_back(TCCheckBlock); 2735 } 2736 2737 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2738 // Reuse existing vector loop preheader for SCEV checks. 2739 // Note that new preheader block is generated for vector loop. 2740 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 2741 2742 // Generate the code to check that the SCEV assumptions that we made. 2743 // We want the new basic block to start at the first instruction in a 2744 // sequence of instructions that form a check. 2745 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2746 "scev.check"); 2747 Value *SCEVCheck = Exp.expandCodeForPredicate( 2748 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 2749 2750 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2751 if (C->isZero()) 2752 return; 2753 2754 assert(!SCEVCheckBlock->getParent()->hasOptSize() && 2755 "Cannot SCEV check stride or overflow when optimizing for size"); 2756 2757 SCEVCheckBlock->setName("vector.scevcheck"); 2758 // Create new preheader for vector loop. 2759 LoopVectorPreHeader = 2760 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 2761 nullptr, "vector.ph"); 2762 2763 // Update dominator only if this is first RT check. 2764 if (LoopBypassBlocks.empty()) { 2765 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 2766 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 2767 } 2768 2769 ReplaceInstWithInst( 2770 SCEVCheckBlock->getTerminator(), 2771 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 2772 LoopBypassBlocks.push_back(SCEVCheckBlock); 2773 AddedSafetyChecks = true; 2774 } 2775 2776 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2777 // VPlan-native path does not do any analysis for runtime checks currently. 2778 if (EnableVPlanNativePath) 2779 return; 2780 2781 // Reuse existing vector loop preheader for runtime memory checks. 2782 // Note that new preheader block is generated for vector loop. 2783 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 2784 2785 // Generate the code that checks in runtime if arrays overlap. We put the 2786 // checks into a separate block to make the more common case of few elements 2787 // faster. 2788 Instruction *FirstCheckInst; 2789 Instruction *MemRuntimeCheck; 2790 std::tie(FirstCheckInst, MemRuntimeCheck) = 2791 Legal->getLAI()->addRuntimeChecks(MemCheckBlock->getTerminator()); 2792 if (!MemRuntimeCheck) 2793 return; 2794 2795 if (MemCheckBlock->getParent()->hasOptSize()) { 2796 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 2797 "Cannot emit memory checks when optimizing for size, unless forced " 2798 "to vectorize."); 2799 ORE->emit([&]() { 2800 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 2801 L->getStartLoc(), L->getHeader()) 2802 << "Code-size may be reduced by not forcing " 2803 "vectorization, or by source-code modifications " 2804 "eliminating the need for runtime checks " 2805 "(e.g., adding 'restrict')."; 2806 }); 2807 } 2808 2809 MemCheckBlock->setName("vector.memcheck"); 2810 // Create new preheader for vector loop. 2811 LoopVectorPreHeader = 2812 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 2813 "vector.ph"); 2814 2815 // Update dominator only if this is first RT check. 2816 if (LoopBypassBlocks.empty()) { 2817 DT->changeImmediateDominator(Bypass, MemCheckBlock); 2818 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 2819 } 2820 2821 ReplaceInstWithInst( 2822 MemCheckBlock->getTerminator(), 2823 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck)); 2824 LoopBypassBlocks.push_back(MemCheckBlock); 2825 AddedSafetyChecks = true; 2826 2827 // We currently don't use LoopVersioning for the actual loop cloning but we 2828 // still use it to add the noalias metadata. 2829 LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2830 PSE.getSE()); 2831 LVer->prepareNoAliasMetadata(); 2832 } 2833 2834 Value *InnerLoopVectorizer::emitTransformedIndex( 2835 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2836 const InductionDescriptor &ID) const { 2837 2838 SCEVExpander Exp(*SE, DL, "induction"); 2839 auto Step = ID.getStep(); 2840 auto StartValue = ID.getStartValue(); 2841 assert(Index->getType() == Step->getType() && 2842 "Index type does not match StepValue type"); 2843 2844 // Note: the IR at this point is broken. We cannot use SE to create any new 2845 // SCEV and then expand it, hoping that SCEV's simplification will give us 2846 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2847 // lead to various SCEV crashes. So all we can do is to use builder and rely 2848 // on InstCombine for future simplifications. Here we handle some trivial 2849 // cases only. 2850 auto CreateAdd = [&B](Value *X, Value *Y) { 2851 assert(X->getType() == Y->getType() && "Types don't match!"); 2852 if (auto *CX = dyn_cast<ConstantInt>(X)) 2853 if (CX->isZero()) 2854 return Y; 2855 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2856 if (CY->isZero()) 2857 return X; 2858 return B.CreateAdd(X, Y); 2859 }; 2860 2861 auto CreateMul = [&B](Value *X, Value *Y) { 2862 assert(X->getType() == Y->getType() && "Types don't match!"); 2863 if (auto *CX = dyn_cast<ConstantInt>(X)) 2864 if (CX->isOne()) 2865 return Y; 2866 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2867 if (CY->isOne()) 2868 return X; 2869 return B.CreateMul(X, Y); 2870 }; 2871 2872 switch (ID.getKind()) { 2873 case InductionDescriptor::IK_IntInduction: { 2874 assert(Index->getType() == StartValue->getType() && 2875 "Index type does not match StartValue type"); 2876 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2877 return B.CreateSub(StartValue, Index); 2878 auto *Offset = CreateMul( 2879 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2880 return CreateAdd(StartValue, Offset); 2881 } 2882 case InductionDescriptor::IK_PtrInduction: { 2883 assert(isa<SCEVConstant>(Step) && 2884 "Expected constant step for pointer induction"); 2885 return B.CreateGEP( 2886 StartValue->getType()->getPointerElementType(), StartValue, 2887 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2888 &*B.GetInsertPoint()))); 2889 } 2890 case InductionDescriptor::IK_FpInduction: { 2891 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2892 auto InductionBinOp = ID.getInductionBinOp(); 2893 assert(InductionBinOp && 2894 (InductionBinOp->getOpcode() == Instruction::FAdd || 2895 InductionBinOp->getOpcode() == Instruction::FSub) && 2896 "Original bin op should be defined for FP induction"); 2897 2898 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2899 2900 // Floating point operations had to be 'fast' to enable the induction. 2901 FastMathFlags Flags; 2902 Flags.setFast(); 2903 2904 Value *MulExp = B.CreateFMul(StepValue, Index); 2905 if (isa<Instruction>(MulExp)) 2906 // We have to check, the MulExp may be a constant. 2907 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2908 2909 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2910 "induction"); 2911 if (isa<Instruction>(BOp)) 2912 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2913 2914 return BOp; 2915 } 2916 case InductionDescriptor::IK_NoInduction: 2917 return nullptr; 2918 } 2919 llvm_unreachable("invalid enum"); 2920 } 2921 2922 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2923 /* 2924 In this function we generate a new loop. The new loop will contain 2925 the vectorized instructions while the old loop will continue to run the 2926 scalar remainder. 2927 2928 [ ] <-- loop iteration number check. 2929 / | 2930 / v 2931 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2932 | / | 2933 | / v 2934 || [ ] <-- vector pre header. 2935 |/ | 2936 | v 2937 | [ ] \ 2938 | [ ]_| <-- vector loop. 2939 | | 2940 | v 2941 | -[ ] <--- middle-block. 2942 | / | 2943 | / v 2944 -|- >[ ] <--- new preheader. 2945 | | 2946 | v 2947 | [ ] \ 2948 | [ ]_| <-- old scalar loop to handle remainder. 2949 \ | 2950 \ v 2951 >[ ] <-- exit block. 2952 ... 2953 */ 2954 2955 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2956 2957 // Some loops have a single integer induction variable, while other loops 2958 // don't. One example is c++ iterators that often have multiple pointer 2959 // induction variables. In the code below we also support a case where we 2960 // don't have a single induction variable. 2961 // 2962 // We try to obtain an induction variable from the original loop as hard 2963 // as possible. However if we don't find one that: 2964 // - is an integer 2965 // - counts from zero, stepping by one 2966 // - is the size of the widest induction variable type 2967 // then we create a new one. 2968 OldInduction = Legal->getPrimaryInduction(); 2969 Type *IdxTy = Legal->getWidestInductionType(); 2970 2971 // Split the single block loop into the two loop structure described above. 2972 LoopScalarBody = OrigLoop->getHeader(); 2973 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 2974 LoopExitBlock = OrigLoop->getExitBlock(); 2975 assert(LoopExitBlock && "Must have an exit block"); 2976 assert(LoopVectorPreHeader && "Invalid loop structure"); 2977 2978 LoopMiddleBlock = 2979 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 2980 LI, nullptr, "middle.block"); 2981 LoopScalarPreHeader = 2982 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 2983 nullptr, "scalar.ph"); 2984 // We intentionally don't let SplitBlock to update LoopInfo since 2985 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 2986 // LoopVectorBody is explicitly added to the correct place few lines later. 2987 LoopVectorBody = 2988 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 2989 nullptr, nullptr, "vector.body"); 2990 2991 // Update dominator for loop exit. 2992 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 2993 2994 // Create and register the new vector loop. 2995 Loop *Lp = LI->AllocateLoop(); 2996 Loop *ParentLoop = OrigLoop->getParentLoop(); 2997 2998 // Insert the new loop into the loop nest and register the new basic blocks 2999 // before calling any utilities such as SCEV that require valid LoopInfo. 3000 if (ParentLoop) { 3001 ParentLoop->addChildLoop(Lp); 3002 } else { 3003 LI->addTopLevelLoop(Lp); 3004 } 3005 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3006 3007 // Find the loop boundaries. 3008 Value *Count = getOrCreateTripCount(Lp); 3009 3010 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3011 3012 // Now, compare the new count to zero. If it is zero skip the vector loop and 3013 // jump to the scalar loop. This check also covers the case where the 3014 // backedge-taken count is uint##_max: adding one to it will overflow leading 3015 // to an incorrect trip count of zero. In this (rare) case we will also jump 3016 // to the scalar loop. 3017 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3018 3019 // Generate the code to check any assumptions that we've made for SCEV 3020 // expressions. 3021 emitSCEVChecks(Lp, LoopScalarPreHeader); 3022 3023 // Generate the code that checks in runtime if arrays overlap. We put the 3024 // checks into a separate block to make the more common case of few elements 3025 // faster. 3026 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3027 3028 // Generate the induction variable. 3029 // The loop step is equal to the vectorization factor (num of SIMD elements) 3030 // times the unroll factor (num of SIMD instructions). 3031 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3032 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3033 Induction = 3034 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3035 getDebugLocFromInstOrOperands(OldInduction)); 3036 3037 // We are going to resume the execution of the scalar loop. 3038 // Go over all of the induction variables that we found and fix the 3039 // PHIs that are left in the scalar version of the loop. 3040 // The starting values of PHI nodes depend on the counter of the last 3041 // iteration in the vectorized loop. 3042 // If we come from a bypass edge then we need to start from the original 3043 // start value. 3044 3045 // This variable saves the new starting index for the scalar loop. It is used 3046 // to test if there are any tail iterations left once the vector loop has 3047 // completed. 3048 for (auto &InductionEntry : Legal->getInductionVars()) { 3049 PHINode *OrigPhi = InductionEntry.first; 3050 InductionDescriptor II = InductionEntry.second; 3051 3052 // Create phi nodes to merge from the backedge-taken check block. 3053 PHINode *BCResumeVal = 3054 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3055 LoopScalarPreHeader->getTerminator()); 3056 // Copy original phi DL over to the new one. 3057 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3058 Value *&EndValue = IVEndValues[OrigPhi]; 3059 if (OrigPhi == OldInduction) { 3060 // We know what the end value is. 3061 EndValue = CountRoundDown; 3062 } else { 3063 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 3064 Type *StepType = II.getStep()->getType(); 3065 Instruction::CastOps CastOp = 3066 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3067 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3068 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3069 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3070 EndValue->setName("ind.end"); 3071 } 3072 3073 // The new PHI merges the original incoming value, in case of a bypass, 3074 // or the value at the end of the vectorized loop. 3075 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3076 3077 // Fix the scalar body counter (PHI node). 3078 // The old induction's phi node in the scalar body needs the truncated 3079 // value. 3080 for (BasicBlock *BB : LoopBypassBlocks) 3081 BCResumeVal->addIncoming(II.getStartValue(), BB); 3082 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3083 } 3084 3085 // We need the OrigLoop (scalar loop part) latch terminator to help 3086 // produce correct debug info for the middle block BB instructions. 3087 // The legality check stage guarantees that the loop will have a single 3088 // latch. 3089 assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) && 3090 "Scalar loop latch terminator isn't a branch"); 3091 BranchInst *ScalarLatchBr = 3092 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()); 3093 3094 // Add a check in the middle block to see if we have completed 3095 // all of the iterations in the first vector loop. 3096 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3097 // If tail is to be folded, we know we don't need to run the remainder. 3098 Value *CmpN = Builder.getTrue(); 3099 if (!Cost->foldTailByMasking()) { 3100 CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3101 CountRoundDown, "cmp.n", 3102 LoopMiddleBlock->getTerminator()); 3103 3104 // Here we use the same DebugLoc as the scalar loop latch branch instead 3105 // of the corresponding compare because they may have ended up with 3106 // different line numbers and we want to avoid awkward line stepping while 3107 // debugging. Eg. if the compare has got a line number inside the loop. 3108 cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc()); 3109 } 3110 3111 BranchInst *BrInst = 3112 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN); 3113 BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc()); 3114 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3115 3116 // Get ready to start creating new instructions into the vectorized body. 3117 assert(LoopVectorPreHeader == Lp->getLoopPreheader() && 3118 "Inconsistent vector loop preheader"); 3119 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3120 3121 Optional<MDNode *> VectorizedLoopID = 3122 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3123 LLVMLoopVectorizeFollowupVectorized}); 3124 if (VectorizedLoopID.hasValue()) { 3125 Lp->setLoopID(VectorizedLoopID.getValue()); 3126 3127 // Do not setAlreadyVectorized if loop attributes have been defined 3128 // explicitly. 3129 return LoopVectorPreHeader; 3130 } 3131 3132 // Keep all loop hints from the original loop on the vector loop (we'll 3133 // replace the vectorizer-specific hints below). 3134 if (MDNode *LID = OrigLoop->getLoopID()) 3135 Lp->setLoopID(LID); 3136 3137 LoopVectorizeHints Hints(Lp, true, *ORE); 3138 Hints.setAlreadyVectorized(); 3139 3140 #ifdef EXPENSIVE_CHECKS 3141 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3142 LI->verify(*DT); 3143 #endif 3144 3145 return LoopVectorPreHeader; 3146 } 3147 3148 // Fix up external users of the induction variable. At this point, we are 3149 // in LCSSA form, with all external PHIs that use the IV having one input value, 3150 // coming from the remainder loop. We need those PHIs to also have a correct 3151 // value for the IV when arriving directly from the middle block. 3152 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3153 const InductionDescriptor &II, 3154 Value *CountRoundDown, Value *EndValue, 3155 BasicBlock *MiddleBlock) { 3156 // There are two kinds of external IV usages - those that use the value 3157 // computed in the last iteration (the PHI) and those that use the penultimate 3158 // value (the value that feeds into the phi from the loop latch). 3159 // We allow both, but they, obviously, have different values. 3160 3161 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3162 3163 DenseMap<Value *, Value *> MissingVals; 3164 3165 // An external user of the last iteration's value should see the value that 3166 // the remainder loop uses to initialize its own IV. 3167 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3168 for (User *U : PostInc->users()) { 3169 Instruction *UI = cast<Instruction>(U); 3170 if (!OrigLoop->contains(UI)) { 3171 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3172 MissingVals[UI] = EndValue; 3173 } 3174 } 3175 3176 // An external user of the penultimate value need to see EndValue - Step. 3177 // The simplest way to get this is to recompute it from the constituent SCEVs, 3178 // that is Start + (Step * (CRD - 1)). 3179 for (User *U : OrigPhi->users()) { 3180 auto *UI = cast<Instruction>(U); 3181 if (!OrigLoop->contains(UI)) { 3182 const DataLayout &DL = 3183 OrigLoop->getHeader()->getModule()->getDataLayout(); 3184 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3185 3186 IRBuilder<> B(MiddleBlock->getTerminator()); 3187 Value *CountMinusOne = B.CreateSub( 3188 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3189 Value *CMO = 3190 !II.getStep()->getType()->isIntegerTy() 3191 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3192 II.getStep()->getType()) 3193 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3194 CMO->setName("cast.cmo"); 3195 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3196 Escape->setName("ind.escape"); 3197 MissingVals[UI] = Escape; 3198 } 3199 } 3200 3201 for (auto &I : MissingVals) { 3202 PHINode *PHI = cast<PHINode>(I.first); 3203 // One corner case we have to handle is two IVs "chasing" each-other, 3204 // that is %IV2 = phi [...], [ %IV1, %latch ] 3205 // In this case, if IV1 has an external use, we need to avoid adding both 3206 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3207 // don't already have an incoming value for the middle block. 3208 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3209 PHI->addIncoming(I.second, MiddleBlock); 3210 } 3211 } 3212 3213 namespace { 3214 3215 struct CSEDenseMapInfo { 3216 static bool canHandle(const Instruction *I) { 3217 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3218 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3219 } 3220 3221 static inline Instruction *getEmptyKey() { 3222 return DenseMapInfo<Instruction *>::getEmptyKey(); 3223 } 3224 3225 static inline Instruction *getTombstoneKey() { 3226 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3227 } 3228 3229 static unsigned getHashValue(const Instruction *I) { 3230 assert(canHandle(I) && "Unknown instruction!"); 3231 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3232 I->value_op_end())); 3233 } 3234 3235 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3236 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3237 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3238 return LHS == RHS; 3239 return LHS->isIdenticalTo(RHS); 3240 } 3241 }; 3242 3243 } // end anonymous namespace 3244 3245 ///Perform cse of induction variable instructions. 3246 static void cse(BasicBlock *BB) { 3247 // Perform simple cse. 3248 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3249 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3250 Instruction *In = &*I++; 3251 3252 if (!CSEDenseMapInfo::canHandle(In)) 3253 continue; 3254 3255 // Check if we can replace this instruction with any of the 3256 // visited instructions. 3257 if (Instruction *V = CSEMap.lookup(In)) { 3258 In->replaceAllUsesWith(V); 3259 In->eraseFromParent(); 3260 continue; 3261 } 3262 3263 CSEMap[In] = In; 3264 } 3265 } 3266 3267 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, 3268 unsigned VF, 3269 bool &NeedToScalarize) { 3270 Function *F = CI->getCalledFunction(); 3271 Type *ScalarRetTy = CI->getType(); 3272 SmallVector<Type *, 4> Tys, ScalarTys; 3273 for (auto &ArgOp : CI->arg_operands()) 3274 ScalarTys.push_back(ArgOp->getType()); 3275 3276 // Estimate cost of scalarized vector call. The source operands are assumed 3277 // to be vectors, so we need to extract individual elements from there, 3278 // execute VF scalar calls, and then gather the result into the vector return 3279 // value. 3280 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3281 if (VF == 1) 3282 return ScalarCallCost; 3283 3284 // Compute corresponding vector type for return value and arguments. 3285 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3286 for (Type *ScalarTy : ScalarTys) 3287 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3288 3289 // Compute costs of unpacking argument values for the scalar calls and 3290 // packing the return values to a vector. 3291 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF); 3292 3293 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3294 3295 // If we can't emit a vector call for this function, then the currently found 3296 // cost is the cost we need to return. 3297 NeedToScalarize = true; 3298 VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/); 3299 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3300 3301 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3302 return Cost; 3303 3304 // If the corresponding vector cost is cheaper, return its cost. 3305 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3306 if (VectorCallCost < Cost) { 3307 NeedToScalarize = false; 3308 return VectorCallCost; 3309 } 3310 return Cost; 3311 } 3312 3313 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3314 unsigned VF) { 3315 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3316 assert(ID && "Expected intrinsic call!"); 3317 3318 FastMathFlags FMF; 3319 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3320 FMF = FPMO->getFastMathFlags(); 3321 3322 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3323 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF, CI); 3324 } 3325 3326 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3327 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3328 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3329 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3330 } 3331 3332 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3333 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3334 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3335 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3336 } 3337 3338 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3339 // For every instruction `I` in MinBWs, truncate the operands, create a 3340 // truncated version of `I` and reextend its result. InstCombine runs 3341 // later and will remove any ext/trunc pairs. 3342 SmallPtrSet<Value *, 4> Erased; 3343 for (const auto &KV : Cost->getMinimalBitwidths()) { 3344 // If the value wasn't vectorized, we must maintain the original scalar 3345 // type. The absence of the value from VectorLoopValueMap indicates that it 3346 // wasn't vectorized. 3347 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3348 continue; 3349 for (unsigned Part = 0; Part < UF; ++Part) { 3350 Value *I = getOrCreateVectorValue(KV.first, Part); 3351 if (Erased.find(I) != Erased.end() || I->use_empty() || 3352 !isa<Instruction>(I)) 3353 continue; 3354 Type *OriginalTy = I->getType(); 3355 Type *ScalarTruncatedTy = 3356 IntegerType::get(OriginalTy->getContext(), KV.second); 3357 Type *TruncatedTy = VectorType::get( 3358 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements()); 3359 if (TruncatedTy == OriginalTy) 3360 continue; 3361 3362 IRBuilder<> B(cast<Instruction>(I)); 3363 auto ShrinkOperand = [&](Value *V) -> Value * { 3364 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3365 if (ZI->getSrcTy() == TruncatedTy) 3366 return ZI->getOperand(0); 3367 return B.CreateZExtOrTrunc(V, TruncatedTy); 3368 }; 3369 3370 // The actual instruction modification depends on the instruction type, 3371 // unfortunately. 3372 Value *NewI = nullptr; 3373 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3374 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3375 ShrinkOperand(BO->getOperand(1))); 3376 3377 // Any wrapping introduced by shrinking this operation shouldn't be 3378 // considered undefined behavior. So, we can't unconditionally copy 3379 // arithmetic wrapping flags to NewI. 3380 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3381 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3382 NewI = 3383 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3384 ShrinkOperand(CI->getOperand(1))); 3385 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3386 NewI = B.CreateSelect(SI->getCondition(), 3387 ShrinkOperand(SI->getTrueValue()), 3388 ShrinkOperand(SI->getFalseValue())); 3389 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3390 switch (CI->getOpcode()) { 3391 default: 3392 llvm_unreachable("Unhandled cast!"); 3393 case Instruction::Trunc: 3394 NewI = ShrinkOperand(CI->getOperand(0)); 3395 break; 3396 case Instruction::SExt: 3397 NewI = B.CreateSExtOrTrunc( 3398 CI->getOperand(0), 3399 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3400 break; 3401 case Instruction::ZExt: 3402 NewI = B.CreateZExtOrTrunc( 3403 CI->getOperand(0), 3404 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3405 break; 3406 } 3407 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3408 auto Elements0 = 3409 cast<VectorType>(SI->getOperand(0)->getType())->getNumElements(); 3410 auto *O0 = B.CreateZExtOrTrunc( 3411 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3412 auto Elements1 = 3413 cast<VectorType>(SI->getOperand(1)->getType())->getNumElements(); 3414 auto *O1 = B.CreateZExtOrTrunc( 3415 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3416 3417 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3418 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3419 // Don't do anything with the operands, just extend the result. 3420 continue; 3421 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3422 auto Elements = 3423 cast<VectorType>(IE->getOperand(0)->getType())->getNumElements(); 3424 auto *O0 = B.CreateZExtOrTrunc( 3425 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3426 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3427 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3428 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3429 auto Elements = 3430 cast<VectorType>(EE->getOperand(0)->getType())->getNumElements(); 3431 auto *O0 = B.CreateZExtOrTrunc( 3432 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3433 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3434 } else { 3435 // If we don't know what to do, be conservative and don't do anything. 3436 continue; 3437 } 3438 3439 // Lastly, extend the result. 3440 NewI->takeName(cast<Instruction>(I)); 3441 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3442 I->replaceAllUsesWith(Res); 3443 cast<Instruction>(I)->eraseFromParent(); 3444 Erased.insert(I); 3445 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3446 } 3447 } 3448 3449 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3450 for (const auto &KV : Cost->getMinimalBitwidths()) { 3451 // If the value wasn't vectorized, we must maintain the original scalar 3452 // type. The absence of the value from VectorLoopValueMap indicates that it 3453 // wasn't vectorized. 3454 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3455 continue; 3456 for (unsigned Part = 0; Part < UF; ++Part) { 3457 Value *I = getOrCreateVectorValue(KV.first, Part); 3458 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3459 if (Inst && Inst->use_empty()) { 3460 Value *NewI = Inst->getOperand(0); 3461 Inst->eraseFromParent(); 3462 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3463 } 3464 } 3465 } 3466 } 3467 3468 void InnerLoopVectorizer::fixVectorizedLoop() { 3469 // Insert truncates and extends for any truncated instructions as hints to 3470 // InstCombine. 3471 if (VF > 1) 3472 truncateToMinimalBitwidths(); 3473 3474 // Fix widened non-induction PHIs by setting up the PHI operands. 3475 if (OrigPHIsToFix.size()) { 3476 assert(EnableVPlanNativePath && 3477 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3478 fixNonInductionPHIs(); 3479 } 3480 3481 // At this point every instruction in the original loop is widened to a 3482 // vector form. Now we need to fix the recurrences in the loop. These PHI 3483 // nodes are currently empty because we did not want to introduce cycles. 3484 // This is the second stage of vectorizing recurrences. 3485 fixCrossIterationPHIs(); 3486 3487 // Forget the original basic block. 3488 PSE.getSE()->forgetLoop(OrigLoop); 3489 3490 // Fix-up external users of the induction variables. 3491 for (auto &Entry : Legal->getInductionVars()) 3492 fixupIVUsers(Entry.first, Entry.second, 3493 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3494 IVEndValues[Entry.first], LoopMiddleBlock); 3495 3496 fixLCSSAPHIs(); 3497 for (Instruction *PI : PredicatedInstructions) 3498 sinkScalarOperands(&*PI); 3499 3500 // Remove redundant induction instructions. 3501 cse(LoopVectorBody); 3502 3503 // Set/update profile weights for the vector and remainder loops as original 3504 // loop iterations are now distributed among them. Note that original loop 3505 // represented by LoopScalarBody becomes remainder loop after vectorization. 3506 // 3507 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3508 // end up getting slightly roughened result but that should be OK since 3509 // profile is not inherently precise anyway. Note also possible bypass of 3510 // vector code caused by legality checks is ignored, assigning all the weight 3511 // to the vector loop, optimistically. 3512 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), 3513 LI->getLoopFor(LoopVectorBody), 3514 LI->getLoopFor(LoopScalarBody), VF * UF); 3515 } 3516 3517 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3518 // In order to support recurrences we need to be able to vectorize Phi nodes. 3519 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3520 // stage #2: We now need to fix the recurrences by adding incoming edges to 3521 // the currently empty PHI nodes. At this point every instruction in the 3522 // original loop is widened to a vector form so we can use them to construct 3523 // the incoming edges. 3524 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3525 // Handle first-order recurrences and reductions that need to be fixed. 3526 if (Legal->isFirstOrderRecurrence(&Phi)) 3527 fixFirstOrderRecurrence(&Phi); 3528 else if (Legal->isReductionVariable(&Phi)) 3529 fixReduction(&Phi); 3530 } 3531 } 3532 3533 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3534 // This is the second phase of vectorizing first-order recurrences. An 3535 // overview of the transformation is described below. Suppose we have the 3536 // following loop. 3537 // 3538 // for (int i = 0; i < n; ++i) 3539 // b[i] = a[i] - a[i - 1]; 3540 // 3541 // There is a first-order recurrence on "a". For this loop, the shorthand 3542 // scalar IR looks like: 3543 // 3544 // scalar.ph: 3545 // s_init = a[-1] 3546 // br scalar.body 3547 // 3548 // scalar.body: 3549 // i = phi [0, scalar.ph], [i+1, scalar.body] 3550 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3551 // s2 = a[i] 3552 // b[i] = s2 - s1 3553 // br cond, scalar.body, ... 3554 // 3555 // In this example, s1 is a recurrence because it's value depends on the 3556 // previous iteration. In the first phase of vectorization, we created a 3557 // temporary value for s1. We now complete the vectorization and produce the 3558 // shorthand vector IR shown below (for VF = 4, UF = 1). 3559 // 3560 // vector.ph: 3561 // v_init = vector(..., ..., ..., a[-1]) 3562 // br vector.body 3563 // 3564 // vector.body 3565 // i = phi [0, vector.ph], [i+4, vector.body] 3566 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3567 // v2 = a[i, i+1, i+2, i+3]; 3568 // v3 = vector(v1(3), v2(0, 1, 2)) 3569 // b[i, i+1, i+2, i+3] = v2 - v3 3570 // br cond, vector.body, middle.block 3571 // 3572 // middle.block: 3573 // x = v2(3) 3574 // br scalar.ph 3575 // 3576 // scalar.ph: 3577 // s_init = phi [x, middle.block], [a[-1], otherwise] 3578 // br scalar.body 3579 // 3580 // After execution completes the vector loop, we extract the next value of 3581 // the recurrence (x) to use as the initial value in the scalar loop. 3582 3583 // Get the original loop preheader and single loop latch. 3584 auto *Preheader = OrigLoop->getLoopPreheader(); 3585 auto *Latch = OrigLoop->getLoopLatch(); 3586 3587 // Get the initial and previous values of the scalar recurrence. 3588 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3589 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3590 3591 // Create a vector from the initial value. 3592 auto *VectorInit = ScalarInit; 3593 if (VF > 1) { 3594 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3595 VectorInit = Builder.CreateInsertElement( 3596 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3597 Builder.getInt32(VF - 1), "vector.recur.init"); 3598 } 3599 3600 // We constructed a temporary phi node in the first phase of vectorization. 3601 // This phi node will eventually be deleted. 3602 Builder.SetInsertPoint( 3603 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3604 3605 // Create a phi node for the new recurrence. The current value will either be 3606 // the initial value inserted into a vector or loop-varying vector value. 3607 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3608 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3609 3610 // Get the vectorized previous value of the last part UF - 1. It appears last 3611 // among all unrolled iterations, due to the order of their construction. 3612 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3613 3614 // Find and set the insertion point after the previous value if it is an 3615 // instruction. 3616 BasicBlock::iterator InsertPt; 3617 // Note that the previous value may have been constant-folded so it is not 3618 // guaranteed to be an instruction in the vector loop. 3619 // FIXME: Loop invariant values do not form recurrences. We should deal with 3620 // them earlier. 3621 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 3622 InsertPt = LoopVectorBody->getFirstInsertionPt(); 3623 else { 3624 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 3625 if (isa<PHINode>(PreviousLastPart)) 3626 // If the previous value is a phi node, we should insert after all the phi 3627 // nodes in the block containing the PHI to avoid breaking basic block 3628 // verification. Note that the basic block may be different to 3629 // LoopVectorBody, in case we predicate the loop. 3630 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 3631 else 3632 InsertPt = ++PreviousInst->getIterator(); 3633 } 3634 Builder.SetInsertPoint(&*InsertPt); 3635 3636 // We will construct a vector for the recurrence by combining the values for 3637 // the current and previous iterations. This is the required shuffle mask. 3638 SmallVector<int, 8> ShuffleMask(VF); 3639 ShuffleMask[0] = VF - 1; 3640 for (unsigned I = 1; I < VF; ++I) 3641 ShuffleMask[I] = I + VF - 1; 3642 3643 // The vector from which to take the initial value for the current iteration 3644 // (actual or unrolled). Initially, this is the vector phi node. 3645 Value *Incoming = VecPhi; 3646 3647 // Shuffle the current and previous vector and update the vector parts. 3648 for (unsigned Part = 0; Part < UF; ++Part) { 3649 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3650 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3651 auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3652 ShuffleMask) 3653 : Incoming; 3654 PhiPart->replaceAllUsesWith(Shuffle); 3655 cast<Instruction>(PhiPart)->eraseFromParent(); 3656 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3657 Incoming = PreviousPart; 3658 } 3659 3660 // Fix the latch value of the new recurrence in the vector loop. 3661 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3662 3663 // Extract the last vector element in the middle block. This will be the 3664 // initial value for the recurrence when jumping to the scalar loop. 3665 auto *ExtractForScalar = Incoming; 3666 if (VF > 1) { 3667 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3668 ExtractForScalar = Builder.CreateExtractElement( 3669 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3670 } 3671 // Extract the second last element in the middle block if the 3672 // Phi is used outside the loop. We need to extract the phi itself 3673 // and not the last element (the phi update in the current iteration). This 3674 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3675 // when the scalar loop is not run at all. 3676 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3677 if (VF > 1) 3678 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3679 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3680 // When loop is unrolled without vectorizing, initialize 3681 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3682 // `Incoming`. This is analogous to the vectorized case above: extracting the 3683 // second last element when VF > 1. 3684 else if (UF > 1) 3685 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3686 3687 // Fix the initial value of the original recurrence in the scalar loop. 3688 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3689 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3690 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3691 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3692 Start->addIncoming(Incoming, BB); 3693 } 3694 3695 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3696 Phi->setName("scalar.recur"); 3697 3698 // Finally, fix users of the recurrence outside the loop. The users will need 3699 // either the last value of the scalar recurrence or the last value of the 3700 // vector recurrence we extracted in the middle block. Since the loop is in 3701 // LCSSA form, we just need to find all the phi nodes for the original scalar 3702 // recurrence in the exit block, and then add an edge for the middle block. 3703 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3704 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3705 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3706 } 3707 } 3708 } 3709 3710 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3711 Constant *Zero = Builder.getInt32(0); 3712 3713 // Get it's reduction variable descriptor. 3714 assert(Legal->isReductionVariable(Phi) && 3715 "Unable to find the reduction variable"); 3716 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 3717 3718 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3719 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3720 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3721 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3722 RdxDesc.getMinMaxRecurrenceKind(); 3723 setDebugLocFromInst(Builder, ReductionStartValue); 3724 3725 // We need to generate a reduction vector from the incoming scalar. 3726 // To do so, we need to generate the 'identity' vector and override 3727 // one of the elements with the incoming scalar reduction. We need 3728 // to do it in the vector-loop preheader. 3729 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3730 3731 // This is the vector-clone of the value that leaves the loop. 3732 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3733 3734 // Find the reduction identity variable. Zero for addition, or, xor, 3735 // one for multiplication, -1 for And. 3736 Value *Identity; 3737 Value *VectorStart; 3738 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3739 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3740 // MinMax reduction have the start value as their identify. 3741 if (VF == 1) { 3742 VectorStart = Identity = ReductionStartValue; 3743 } else { 3744 VectorStart = Identity = 3745 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3746 } 3747 } else { 3748 // Handle other reduction kinds: 3749 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3750 RK, VecTy->getScalarType()); 3751 if (VF == 1) { 3752 Identity = Iden; 3753 // This vector is the Identity vector where the first element is the 3754 // incoming scalar reduction. 3755 VectorStart = ReductionStartValue; 3756 } else { 3757 Identity = ConstantVector::getSplat({VF, false}, Iden); 3758 3759 // This vector is the Identity vector where the first element is the 3760 // incoming scalar reduction. 3761 VectorStart = 3762 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3763 } 3764 } 3765 3766 // Wrap flags are in general invalid after vectorization, clear them. 3767 clearReductionWrapFlags(RdxDesc); 3768 3769 // Fix the vector-loop phi. 3770 3771 // Reductions do not have to start at zero. They can start with 3772 // any loop invariant values. 3773 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3774 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3775 3776 for (unsigned Part = 0; Part < UF; ++Part) { 3777 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3778 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3779 // Make sure to add the reduction start value only to the 3780 // first unroll part. 3781 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3782 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3783 cast<PHINode>(VecRdxPhi) 3784 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3785 } 3786 3787 // Before each round, move the insertion point right between 3788 // the PHIs and the values we are going to write. 3789 // This allows us to write both PHINodes and the extractelement 3790 // instructions. 3791 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3792 3793 setDebugLocFromInst(Builder, LoopExitInst); 3794 3795 // If tail is folded by masking, the vector value to leave the loop should be 3796 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3797 // instead of the former. 3798 if (Cost->foldTailByMasking()) { 3799 for (unsigned Part = 0; Part < UF; ++Part) { 3800 Value *VecLoopExitInst = 3801 VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3802 Value *Sel = nullptr; 3803 for (User *U : VecLoopExitInst->users()) { 3804 if (isa<SelectInst>(U)) { 3805 assert(!Sel && "Reduction exit feeding two selects"); 3806 Sel = U; 3807 } else 3808 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3809 } 3810 assert(Sel && "Reduction exit feeds no select"); 3811 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel); 3812 } 3813 } 3814 3815 // If the vector reduction can be performed in a smaller type, we truncate 3816 // then extend the loop exit value to enable InstCombine to evaluate the 3817 // entire expression in the smaller type. 3818 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3819 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3820 Builder.SetInsertPoint( 3821 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3822 VectorParts RdxParts(UF); 3823 for (unsigned Part = 0; Part < UF; ++Part) { 3824 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3825 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3826 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3827 : Builder.CreateZExt(Trunc, VecTy); 3828 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3829 UI != RdxParts[Part]->user_end();) 3830 if (*UI != Trunc) { 3831 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3832 RdxParts[Part] = Extnd; 3833 } else { 3834 ++UI; 3835 } 3836 } 3837 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3838 for (unsigned Part = 0; Part < UF; ++Part) { 3839 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3840 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3841 } 3842 } 3843 3844 // Reduce all of the unrolled parts into a single vector. 3845 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3846 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3847 3848 // The middle block terminator has already been assigned a DebugLoc here (the 3849 // OrigLoop's single latch terminator). We want the whole middle block to 3850 // appear to execute on this line because: (a) it is all compiler generated, 3851 // (b) these instructions are always executed after evaluating the latch 3852 // conditional branch, and (c) other passes may add new predecessors which 3853 // terminate on this line. This is the easiest way to ensure we don't 3854 // accidentally cause an extra step back into the loop while debugging. 3855 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 3856 for (unsigned Part = 1; Part < UF; ++Part) { 3857 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3858 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3859 // Floating point operations had to be 'fast' to enable the reduction. 3860 ReducedPartRdx = addFastMathFlag( 3861 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3862 ReducedPartRdx, "bin.rdx"), 3863 RdxDesc.getFastMathFlags()); 3864 else 3865 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3866 RdxPart); 3867 } 3868 3869 if (VF > 1) { 3870 bool NoNaN = Legal->hasFunNoNaNAttr(); 3871 ReducedPartRdx = 3872 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3873 // If the reduction can be performed in a smaller type, we need to extend 3874 // the reduction to the wider type before we branch to the original loop. 3875 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3876 ReducedPartRdx = 3877 RdxDesc.isSigned() 3878 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3879 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3880 } 3881 3882 // Create a phi node that merges control-flow from the backedge-taken check 3883 // block and the middle block. 3884 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3885 LoopScalarPreHeader->getTerminator()); 3886 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3887 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3888 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3889 3890 // Now, we need to fix the users of the reduction variable 3891 // inside and outside of the scalar remainder loop. 3892 // We know that the loop is in LCSSA form. We need to update the 3893 // PHI nodes in the exit blocks. 3894 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3895 // All PHINodes need to have a single entry edge, or two if 3896 // we already fixed them. 3897 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3898 3899 // We found a reduction value exit-PHI. Update it with the 3900 // incoming bypass edge. 3901 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3902 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3903 } // end of the LCSSA phi scan. 3904 3905 // Fix the scalar loop reduction variable with the incoming reduction sum 3906 // from the vector body and from the backedge value. 3907 int IncomingEdgeBlockIdx = 3908 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3909 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3910 // Pick the other block. 3911 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3912 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3913 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3914 } 3915 3916 void InnerLoopVectorizer::clearReductionWrapFlags( 3917 RecurrenceDescriptor &RdxDesc) { 3918 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3919 if (RK != RecurrenceDescriptor::RK_IntegerAdd && 3920 RK != RecurrenceDescriptor::RK_IntegerMult) 3921 return; 3922 3923 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 3924 assert(LoopExitInstr && "null loop exit instruction"); 3925 SmallVector<Instruction *, 8> Worklist; 3926 SmallPtrSet<Instruction *, 8> Visited; 3927 Worklist.push_back(LoopExitInstr); 3928 Visited.insert(LoopExitInstr); 3929 3930 while (!Worklist.empty()) { 3931 Instruction *Cur = Worklist.pop_back_val(); 3932 if (isa<OverflowingBinaryOperator>(Cur)) 3933 for (unsigned Part = 0; Part < UF; ++Part) { 3934 Value *V = getOrCreateVectorValue(Cur, Part); 3935 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 3936 } 3937 3938 for (User *U : Cur->users()) { 3939 Instruction *UI = cast<Instruction>(U); 3940 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 3941 Visited.insert(UI).second) 3942 Worklist.push_back(UI); 3943 } 3944 } 3945 } 3946 3947 void InnerLoopVectorizer::fixLCSSAPHIs() { 3948 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3949 if (LCSSAPhi.getNumIncomingValues() == 1) { 3950 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3951 // Non-instruction incoming values will have only one value. 3952 unsigned LastLane = 0; 3953 if (isa<Instruction>(IncomingValue)) 3954 LastLane = Cost->isUniformAfterVectorization( 3955 cast<Instruction>(IncomingValue), VF) 3956 ? 0 3957 : VF - 1; 3958 // Can be a loop invariant incoming value or the last scalar value to be 3959 // extracted from the vectorized loop. 3960 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3961 Value *lastIncomingValue = 3962 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3963 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3964 } 3965 } 3966 } 3967 3968 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3969 // The basic block and loop containing the predicated instruction. 3970 auto *PredBB = PredInst->getParent(); 3971 auto *VectorLoop = LI->getLoopFor(PredBB); 3972 3973 // Initialize a worklist with the operands of the predicated instruction. 3974 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3975 3976 // Holds instructions that we need to analyze again. An instruction may be 3977 // reanalyzed if we don't yet know if we can sink it or not. 3978 SmallVector<Instruction *, 8> InstsToReanalyze; 3979 3980 // Returns true if a given use occurs in the predicated block. Phi nodes use 3981 // their operands in their corresponding predecessor blocks. 3982 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3983 auto *I = cast<Instruction>(U.getUser()); 3984 BasicBlock *BB = I->getParent(); 3985 if (auto *Phi = dyn_cast<PHINode>(I)) 3986 BB = Phi->getIncomingBlock( 3987 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3988 return BB == PredBB; 3989 }; 3990 3991 // Iteratively sink the scalarized operands of the predicated instruction 3992 // into the block we created for it. When an instruction is sunk, it's 3993 // operands are then added to the worklist. The algorithm ends after one pass 3994 // through the worklist doesn't sink a single instruction. 3995 bool Changed; 3996 do { 3997 // Add the instructions that need to be reanalyzed to the worklist, and 3998 // reset the changed indicator. 3999 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4000 InstsToReanalyze.clear(); 4001 Changed = false; 4002 4003 while (!Worklist.empty()) { 4004 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4005 4006 // We can't sink an instruction if it is a phi node, is already in the 4007 // predicated block, is not in the loop, or may have side effects. 4008 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4009 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4010 continue; 4011 4012 // It's legal to sink the instruction if all its uses occur in the 4013 // predicated block. Otherwise, there's nothing to do yet, and we may 4014 // need to reanalyze the instruction. 4015 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4016 InstsToReanalyze.push_back(I); 4017 continue; 4018 } 4019 4020 // Move the instruction to the beginning of the predicated block, and add 4021 // it's operands to the worklist. 4022 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4023 Worklist.insert(I->op_begin(), I->op_end()); 4024 4025 // The sinking may have enabled other instructions to be sunk, so we will 4026 // need to iterate. 4027 Changed = true; 4028 } 4029 } while (Changed); 4030 } 4031 4032 void InnerLoopVectorizer::fixNonInductionPHIs() { 4033 for (PHINode *OrigPhi : OrigPHIsToFix) { 4034 PHINode *NewPhi = 4035 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 4036 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 4037 4038 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 4039 predecessors(OrigPhi->getParent())); 4040 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 4041 predecessors(NewPhi->getParent())); 4042 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 4043 "Scalar and Vector BB should have the same number of predecessors"); 4044 4045 // The insertion point in Builder may be invalidated by the time we get 4046 // here. Force the Builder insertion point to something valid so that we do 4047 // not run into issues during insertion point restore in 4048 // getOrCreateVectorValue calls below. 4049 Builder.SetInsertPoint(NewPhi); 4050 4051 // The predecessor order is preserved and we can rely on mapping between 4052 // scalar and vector block predecessors. 4053 for (unsigned i = 0; i < NumIncomingValues; ++i) { 4054 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 4055 4056 // When looking up the new scalar/vector values to fix up, use incoming 4057 // values from original phi. 4058 Value *ScIncV = 4059 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 4060 4061 // Scalar incoming value may need a broadcast 4062 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 4063 NewPhi->addIncoming(NewIncV, NewPredBB); 4064 } 4065 } 4066 } 4067 4068 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, unsigned UF, 4069 unsigned VF, bool IsPtrLoopInvariant, 4070 SmallBitVector &IsIndexLoopInvariant) { 4071 // Construct a vector GEP by widening the operands of the scalar GEP as 4072 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4073 // results in a vector of pointers when at least one operand of the GEP 4074 // is vector-typed. Thus, to keep the representation compact, we only use 4075 // vector-typed operands for loop-varying values. 4076 4077 if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4078 // If we are vectorizing, but the GEP has only loop-invariant operands, 4079 // the GEP we build (by only using vector-typed operands for 4080 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4081 // produce a vector of pointers, we need to either arbitrarily pick an 4082 // operand to broadcast, or broadcast a clone of the original GEP. 4083 // Here, we broadcast a clone of the original. 4084 // 4085 // TODO: If at some point we decide to scalarize instructions having 4086 // loop-invariant operands, this special case will no longer be 4087 // required. We would add the scalarization decision to 4088 // collectLoopScalars() and teach getVectorValue() to broadcast 4089 // the lane-zero scalar value. 4090 auto *Clone = Builder.Insert(GEP->clone()); 4091 for (unsigned Part = 0; Part < UF; ++Part) { 4092 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4093 VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart); 4094 addMetadata(EntryPart, GEP); 4095 } 4096 } else { 4097 // If the GEP has at least one loop-varying operand, we are sure to 4098 // produce a vector of pointers. But if we are only unrolling, we want 4099 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4100 // produce with the code below will be scalar (if VF == 1) or vector 4101 // (otherwise). Note that for the unroll-only case, we still maintain 4102 // values in the vector mapping with initVector, as we do for other 4103 // instructions. 4104 for (unsigned Part = 0; Part < UF; ++Part) { 4105 // The pointer operand of the new GEP. If it's loop-invariant, we 4106 // won't broadcast it. 4107 auto *Ptr = IsPtrLoopInvariant 4108 ? GEP->getPointerOperand() 4109 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 4110 4111 // Collect all the indices for the new GEP. If any index is 4112 // loop-invariant, we won't broadcast it. 4113 SmallVector<Value *, 4> Indices; 4114 for (auto Index : enumerate(GEP->indices())) { 4115 Value *User = Index.value().get(); 4116 if (IsIndexLoopInvariant[Index.index()]) 4117 Indices.push_back(User); 4118 else 4119 Indices.push_back(getOrCreateVectorValue(User, Part)); 4120 } 4121 4122 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4123 // but it should be a vector, otherwise. 4124 auto *NewGEP = 4125 GEP->isInBounds() 4126 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4127 Indices) 4128 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4129 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4130 "NewGEP is not a pointer vector"); 4131 VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP); 4132 addMetadata(NewGEP, GEP); 4133 } 4134 } 4135 } 4136 4137 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4138 unsigned VF) { 4139 PHINode *P = cast<PHINode>(PN); 4140 if (EnableVPlanNativePath) { 4141 // Currently we enter here in the VPlan-native path for non-induction 4142 // PHIs where all control flow is uniform. We simply widen these PHIs. 4143 // Create a vector phi with no operands - the vector phi operands will be 4144 // set at the end of vector code generation. 4145 Type *VecTy = 4146 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4147 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4148 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 4149 OrigPHIsToFix.push_back(P); 4150 4151 return; 4152 } 4153 4154 assert(PN->getParent() == OrigLoop->getHeader() && 4155 "Non-header phis should have been handled elsewhere"); 4156 4157 // In order to support recurrences we need to be able to vectorize Phi nodes. 4158 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4159 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4160 // this value when we vectorize all of the instructions that use the PHI. 4161 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4162 for (unsigned Part = 0; Part < UF; ++Part) { 4163 // This is phase one of vectorizing PHIs. 4164 Type *VecTy = 4165 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4166 Value *EntryPart = PHINode::Create( 4167 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4168 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4169 } 4170 return; 4171 } 4172 4173 setDebugLocFromInst(Builder, P); 4174 4175 // This PHINode must be an induction variable. 4176 // Make sure that we know about it. 4177 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4178 4179 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4180 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4181 4182 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4183 // which can be found from the original scalar operations. 4184 switch (II.getKind()) { 4185 case InductionDescriptor::IK_NoInduction: 4186 llvm_unreachable("Unknown induction"); 4187 case InductionDescriptor::IK_IntInduction: 4188 case InductionDescriptor::IK_FpInduction: 4189 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4190 case InductionDescriptor::IK_PtrInduction: { 4191 // Handle the pointer induction variable case. 4192 assert(P->getType()->isPointerTy() && "Unexpected type."); 4193 // This is the normalized GEP that starts counting at zero. 4194 Value *PtrInd = Induction; 4195 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4196 // Determine the number of scalars we need to generate for each unroll 4197 // iteration. If the instruction is uniform, we only need to generate the 4198 // first lane. Otherwise, we generate all VF values. 4199 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4200 // These are the scalar results. Notice that we don't generate vector GEPs 4201 // because scalar GEPs result in better code. 4202 for (unsigned Part = 0; Part < UF; ++Part) { 4203 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4204 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4205 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4206 Value *SclrGep = 4207 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4208 SclrGep->setName("next.gep"); 4209 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4210 } 4211 } 4212 return; 4213 } 4214 } 4215 } 4216 4217 /// A helper function for checking whether an integer division-related 4218 /// instruction may divide by zero (in which case it must be predicated if 4219 /// executed conditionally in the scalar code). 4220 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4221 /// Non-zero divisors that are non compile-time constants will not be 4222 /// converted into multiplication, so we will still end up scalarizing 4223 /// the division, but can do so w/o predication. 4224 static bool mayDivideByZero(Instruction &I) { 4225 assert((I.getOpcode() == Instruction::UDiv || 4226 I.getOpcode() == Instruction::SDiv || 4227 I.getOpcode() == Instruction::URem || 4228 I.getOpcode() == Instruction::SRem) && 4229 "Unexpected instruction"); 4230 Value *Divisor = I.getOperand(1); 4231 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4232 return !CInt || CInt->isZero(); 4233 } 4234 4235 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User, 4236 VPTransformState &State) { 4237 switch (I.getOpcode()) { 4238 case Instruction::Call: 4239 case Instruction::Br: 4240 case Instruction::PHI: 4241 case Instruction::GetElementPtr: 4242 case Instruction::Select: 4243 llvm_unreachable("This instruction is handled by a different recipe."); 4244 case Instruction::UDiv: 4245 case Instruction::SDiv: 4246 case Instruction::SRem: 4247 case Instruction::URem: 4248 case Instruction::Add: 4249 case Instruction::FAdd: 4250 case Instruction::Sub: 4251 case Instruction::FSub: 4252 case Instruction::FNeg: 4253 case Instruction::Mul: 4254 case Instruction::FMul: 4255 case Instruction::FDiv: 4256 case Instruction::FRem: 4257 case Instruction::Shl: 4258 case Instruction::LShr: 4259 case Instruction::AShr: 4260 case Instruction::And: 4261 case Instruction::Or: 4262 case Instruction::Xor: { 4263 // Just widen unops and binops. 4264 setDebugLocFromInst(Builder, &I); 4265 4266 for (unsigned Part = 0; Part < UF; ++Part) { 4267 SmallVector<Value *, 2> Ops; 4268 for (VPValue *VPOp : User.operands()) 4269 Ops.push_back(State.get(VPOp, Part)); 4270 4271 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4272 4273 if (auto *VecOp = dyn_cast<Instruction>(V)) 4274 VecOp->copyIRFlags(&I); 4275 4276 // Use this vector value for all users of the original instruction. 4277 VectorLoopValueMap.setVectorValue(&I, Part, V); 4278 addMetadata(V, &I); 4279 } 4280 4281 break; 4282 } 4283 case Instruction::ICmp: 4284 case Instruction::FCmp: { 4285 // Widen compares. Generate vector compares. 4286 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4287 auto *Cmp = cast<CmpInst>(&I); 4288 setDebugLocFromInst(Builder, Cmp); 4289 for (unsigned Part = 0; Part < UF; ++Part) { 4290 Value *A = State.get(User.getOperand(0), Part); 4291 Value *B = State.get(User.getOperand(1), Part); 4292 Value *C = nullptr; 4293 if (FCmp) { 4294 // Propagate fast math flags. 4295 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4296 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4297 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4298 } else { 4299 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4300 } 4301 VectorLoopValueMap.setVectorValue(&I, Part, C); 4302 addMetadata(C, &I); 4303 } 4304 4305 break; 4306 } 4307 4308 case Instruction::ZExt: 4309 case Instruction::SExt: 4310 case Instruction::FPToUI: 4311 case Instruction::FPToSI: 4312 case Instruction::FPExt: 4313 case Instruction::PtrToInt: 4314 case Instruction::IntToPtr: 4315 case Instruction::SIToFP: 4316 case Instruction::UIToFP: 4317 case Instruction::Trunc: 4318 case Instruction::FPTrunc: 4319 case Instruction::BitCast: { 4320 auto *CI = cast<CastInst>(&I); 4321 setDebugLocFromInst(Builder, CI); 4322 4323 /// Vectorize casts. 4324 Type *DestTy = 4325 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4326 4327 for (unsigned Part = 0; Part < UF; ++Part) { 4328 Value *A = State.get(User.getOperand(0), Part); 4329 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4330 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4331 addMetadata(Cast, &I); 4332 } 4333 break; 4334 } 4335 default: 4336 // This instruction is not vectorized by simple widening. 4337 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4338 llvm_unreachable("Unhandled instruction!"); 4339 } // end of switch. 4340 } 4341 4342 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands, 4343 VPTransformState &State) { 4344 assert(!isa<DbgInfoIntrinsic>(I) && 4345 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4346 setDebugLocFromInst(Builder, &I); 4347 4348 Module *M = I.getParent()->getParent()->getParent(); 4349 auto *CI = cast<CallInst>(&I); 4350 4351 SmallVector<Type *, 4> Tys; 4352 for (Value *ArgOperand : CI->arg_operands()) 4353 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4354 4355 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4356 4357 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4358 // version of the instruction. 4359 // Is it beneficial to perform intrinsic call compared to lib call? 4360 bool NeedToScalarize = false; 4361 unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4362 bool UseVectorIntrinsic = 4363 ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost; 4364 assert((UseVectorIntrinsic || !NeedToScalarize) && 4365 "Instruction should be scalarized elsewhere."); 4366 4367 for (unsigned Part = 0; Part < UF; ++Part) { 4368 SmallVector<Value *, 4> Args; 4369 for (auto &I : enumerate(ArgOperands.operands())) { 4370 // Some intrinsics have a scalar argument - don't replace it with a 4371 // vector. 4372 Value *Arg; 4373 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4374 Arg = State.get(I.value(), Part); 4375 else 4376 Arg = State.get(I.value(), {0, 0}); 4377 Args.push_back(Arg); 4378 } 4379 4380 Function *VectorF; 4381 if (UseVectorIntrinsic) { 4382 // Use vector version of the intrinsic. 4383 Type *TysForDecl[] = {CI->getType()}; 4384 if (VF > 1) 4385 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4386 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4387 } else { 4388 // Use vector version of the function call. 4389 const VFShape Shape = 4390 VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/); 4391 #ifndef NDEBUG 4392 const SmallVector<VFInfo, 8> Infos = VFDatabase::getMappings(*CI); 4393 assert(std::find_if(Infos.begin(), Infos.end(), 4394 [&Shape](const VFInfo &Info) { 4395 return Info.Shape == Shape; 4396 }) != Infos.end() && 4397 "Vector function shape is missing from the database."); 4398 #endif 4399 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4400 } 4401 assert(VectorF && "Can't create vector function."); 4402 4403 SmallVector<OperandBundleDef, 1> OpBundles; 4404 CI->getOperandBundlesAsDefs(OpBundles); 4405 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4406 4407 if (isa<FPMathOperator>(V)) 4408 V->copyFastMathFlags(CI); 4409 4410 VectorLoopValueMap.setVectorValue(&I, Part, V); 4411 addMetadata(V, &I); 4412 } 4413 } 4414 4415 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, 4416 bool InvariantCond) { 4417 setDebugLocFromInst(Builder, &I); 4418 4419 // The condition can be loop invariant but still defined inside the 4420 // loop. This means that we can't just use the original 'cond' value. 4421 // We have to take the 'vectorized' value and pick the first lane. 4422 // Instcombine will make this a no-op. 4423 4424 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4425 4426 for (unsigned Part = 0; Part < UF; ++Part) { 4427 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4428 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4429 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4430 Value *Sel = 4431 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4432 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4433 addMetadata(Sel, &I); 4434 } 4435 } 4436 4437 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4438 // We should not collect Scalars more than once per VF. Right now, this 4439 // function is called from collectUniformsAndScalars(), which already does 4440 // this check. Collecting Scalars for VF=1 does not make any sense. 4441 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4442 "This function should not be visited twice for the same VF"); 4443 4444 SmallSetVector<Instruction *, 8> Worklist; 4445 4446 // These sets are used to seed the analysis with pointers used by memory 4447 // accesses that will remain scalar. 4448 SmallSetVector<Instruction *, 8> ScalarPtrs; 4449 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4450 4451 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4452 // The pointer operands of loads and stores will be scalar as long as the 4453 // memory access is not a gather or scatter operation. The value operand of a 4454 // store will remain scalar if the store is scalarized. 4455 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4456 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4457 assert(WideningDecision != CM_Unknown && 4458 "Widening decision should be ready at this moment"); 4459 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4460 if (Ptr == Store->getValueOperand()) 4461 return WideningDecision == CM_Scalarize; 4462 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4463 "Ptr is neither a value or pointer operand"); 4464 return WideningDecision != CM_GatherScatter; 4465 }; 4466 4467 // A helper that returns true if the given value is a bitcast or 4468 // getelementptr instruction contained in the loop. 4469 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4470 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4471 isa<GetElementPtrInst>(V)) && 4472 !TheLoop->isLoopInvariant(V); 4473 }; 4474 4475 // A helper that evaluates a memory access's use of a pointer. If the use 4476 // will be a scalar use, and the pointer is only used by memory accesses, we 4477 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4478 // PossibleNonScalarPtrs. 4479 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4480 // We only care about bitcast and getelementptr instructions contained in 4481 // the loop. 4482 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4483 return; 4484 4485 // If the pointer has already been identified as scalar (e.g., if it was 4486 // also identified as uniform), there's nothing to do. 4487 auto *I = cast<Instruction>(Ptr); 4488 if (Worklist.count(I)) 4489 return; 4490 4491 // If the use of the pointer will be a scalar use, and all users of the 4492 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4493 // place the pointer in PossibleNonScalarPtrs. 4494 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4495 return isa<LoadInst>(U) || isa<StoreInst>(U); 4496 })) 4497 ScalarPtrs.insert(I); 4498 else 4499 PossibleNonScalarPtrs.insert(I); 4500 }; 4501 4502 // We seed the scalars analysis with three classes of instructions: (1) 4503 // instructions marked uniform-after-vectorization, (2) bitcast and 4504 // getelementptr instructions used by memory accesses requiring a scalar use, 4505 // and (3) pointer induction variables and their update instructions (we 4506 // currently only scalarize these). 4507 // 4508 // (1) Add to the worklist all instructions that have been identified as 4509 // uniform-after-vectorization. 4510 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4511 4512 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4513 // memory accesses requiring a scalar use. The pointer operands of loads and 4514 // stores will be scalar as long as the memory accesses is not a gather or 4515 // scatter operation. The value operand of a store will remain scalar if the 4516 // store is scalarized. 4517 for (auto *BB : TheLoop->blocks()) 4518 for (auto &I : *BB) { 4519 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4520 evaluatePtrUse(Load, Load->getPointerOperand()); 4521 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4522 evaluatePtrUse(Store, Store->getPointerOperand()); 4523 evaluatePtrUse(Store, Store->getValueOperand()); 4524 } 4525 } 4526 for (auto *I : ScalarPtrs) 4527 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4528 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4529 Worklist.insert(I); 4530 } 4531 4532 // (3) Add to the worklist all pointer induction variables and their update 4533 // instructions. 4534 // 4535 // TODO: Once we are able to vectorize pointer induction variables we should 4536 // no longer insert them into the worklist here. 4537 auto *Latch = TheLoop->getLoopLatch(); 4538 for (auto &Induction : Legal->getInductionVars()) { 4539 auto *Ind = Induction.first; 4540 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4541 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4542 continue; 4543 Worklist.insert(Ind); 4544 Worklist.insert(IndUpdate); 4545 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4546 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4547 << "\n"); 4548 } 4549 4550 // Insert the forced scalars. 4551 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4552 // induction variable when the PHI user is scalarized. 4553 auto ForcedScalar = ForcedScalars.find(VF); 4554 if (ForcedScalar != ForcedScalars.end()) 4555 for (auto *I : ForcedScalar->second) 4556 Worklist.insert(I); 4557 4558 // Expand the worklist by looking through any bitcasts and getelementptr 4559 // instructions we've already identified as scalar. This is similar to the 4560 // expansion step in collectLoopUniforms(); however, here we're only 4561 // expanding to include additional bitcasts and getelementptr instructions. 4562 unsigned Idx = 0; 4563 while (Idx != Worklist.size()) { 4564 Instruction *Dst = Worklist[Idx++]; 4565 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4566 continue; 4567 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4568 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4569 auto *J = cast<Instruction>(U); 4570 return !TheLoop->contains(J) || Worklist.count(J) || 4571 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4572 isScalarUse(J, Src)); 4573 })) { 4574 Worklist.insert(Src); 4575 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4576 } 4577 } 4578 4579 // An induction variable will remain scalar if all users of the induction 4580 // variable and induction variable update remain scalar. 4581 for (auto &Induction : Legal->getInductionVars()) { 4582 auto *Ind = Induction.first; 4583 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4584 4585 // We already considered pointer induction variables, so there's no reason 4586 // to look at their users again. 4587 // 4588 // TODO: Once we are able to vectorize pointer induction variables we 4589 // should no longer skip over them here. 4590 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4591 continue; 4592 4593 // Determine if all users of the induction variable are scalar after 4594 // vectorization. 4595 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4596 auto *I = cast<Instruction>(U); 4597 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4598 }); 4599 if (!ScalarInd) 4600 continue; 4601 4602 // Determine if all users of the induction variable update instruction are 4603 // scalar after vectorization. 4604 auto ScalarIndUpdate = 4605 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4606 auto *I = cast<Instruction>(U); 4607 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4608 }); 4609 if (!ScalarIndUpdate) 4610 continue; 4611 4612 // The induction variable and its update instruction will remain scalar. 4613 Worklist.insert(Ind); 4614 Worklist.insert(IndUpdate); 4615 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4616 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4617 << "\n"); 4618 } 4619 4620 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4621 } 4622 4623 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4624 if (!blockNeedsPredication(I->getParent())) 4625 return false; 4626 switch(I->getOpcode()) { 4627 default: 4628 break; 4629 case Instruction::Load: 4630 case Instruction::Store: { 4631 if (!Legal->isMaskRequired(I)) 4632 return false; 4633 auto *Ptr = getLoadStorePointerOperand(I); 4634 auto *Ty = getMemInstValueType(I); 4635 // We have already decided how to vectorize this instruction, get that 4636 // result. 4637 if (VF > 1) { 4638 InstWidening WideningDecision = getWideningDecision(I, VF); 4639 assert(WideningDecision != CM_Unknown && 4640 "Widening decision should be ready at this moment"); 4641 return WideningDecision == CM_Scalarize; 4642 } 4643 const MaybeAlign Alignment = getLoadStoreAlignment(I); 4644 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4645 isLegalMaskedGather(Ty, Alignment)) 4646 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4647 isLegalMaskedScatter(Ty, Alignment)); 4648 } 4649 case Instruction::UDiv: 4650 case Instruction::SDiv: 4651 case Instruction::SRem: 4652 case Instruction::URem: 4653 return mayDivideByZero(*I); 4654 } 4655 return false; 4656 } 4657 4658 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4659 unsigned VF) { 4660 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4661 assert(getWideningDecision(I, VF) == CM_Unknown && 4662 "Decision should not be set yet."); 4663 auto *Group = getInterleavedAccessGroup(I); 4664 assert(Group && "Must have a group."); 4665 4666 // If the instruction's allocated size doesn't equal it's type size, it 4667 // requires padding and will be scalarized. 4668 auto &DL = I->getModule()->getDataLayout(); 4669 auto *ScalarTy = getMemInstValueType(I); 4670 if (hasIrregularType(ScalarTy, DL, VF)) 4671 return false; 4672 4673 // Check if masking is required. 4674 // A Group may need masking for one of two reasons: it resides in a block that 4675 // needs predication, or it was decided to use masking to deal with gaps. 4676 bool PredicatedAccessRequiresMasking = 4677 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4678 bool AccessWithGapsRequiresMasking = 4679 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 4680 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4681 return true; 4682 4683 // If masked interleaving is required, we expect that the user/target had 4684 // enabled it, because otherwise it either wouldn't have been created or 4685 // it should have been invalidated by the CostModel. 4686 assert(useMaskedInterleavedAccesses(TTI) && 4687 "Masked interleave-groups for predicated accesses are not enabled."); 4688 4689 auto *Ty = getMemInstValueType(I); 4690 const MaybeAlign Alignment = getLoadStoreAlignment(I); 4691 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4692 : TTI.isLegalMaskedStore(Ty, Alignment); 4693 } 4694 4695 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4696 unsigned VF) { 4697 // Get and ensure we have a valid memory instruction. 4698 LoadInst *LI = dyn_cast<LoadInst>(I); 4699 StoreInst *SI = dyn_cast<StoreInst>(I); 4700 assert((LI || SI) && "Invalid memory instruction"); 4701 4702 auto *Ptr = getLoadStorePointerOperand(I); 4703 4704 // In order to be widened, the pointer should be consecutive, first of all. 4705 if (!Legal->isConsecutivePtr(Ptr)) 4706 return false; 4707 4708 // If the instruction is a store located in a predicated block, it will be 4709 // scalarized. 4710 if (isScalarWithPredication(I)) 4711 return false; 4712 4713 // If the instruction's allocated size doesn't equal it's type size, it 4714 // requires padding and will be scalarized. 4715 auto &DL = I->getModule()->getDataLayout(); 4716 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4717 if (hasIrregularType(ScalarTy, DL, VF)) 4718 return false; 4719 4720 return true; 4721 } 4722 4723 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4724 // We should not collect Uniforms more than once per VF. Right now, 4725 // this function is called from collectUniformsAndScalars(), which 4726 // already does this check. Collecting Uniforms for VF=1 does not make any 4727 // sense. 4728 4729 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4730 "This function should not be visited twice for the same VF"); 4731 4732 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4733 // not analyze again. Uniforms.count(VF) will return 1. 4734 Uniforms[VF].clear(); 4735 4736 // We now know that the loop is vectorizable! 4737 // Collect instructions inside the loop that will remain uniform after 4738 // vectorization. 4739 4740 // Global values, params and instructions outside of current loop are out of 4741 // scope. 4742 auto isOutOfScope = [&](Value *V) -> bool { 4743 Instruction *I = dyn_cast<Instruction>(V); 4744 return (!I || !TheLoop->contains(I)); 4745 }; 4746 4747 SetVector<Instruction *> Worklist; 4748 BasicBlock *Latch = TheLoop->getLoopLatch(); 4749 4750 // Instructions that are scalar with predication must not be considered 4751 // uniform after vectorization, because that would create an erroneous 4752 // replicating region where only a single instance out of VF should be formed. 4753 // TODO: optimize such seldom cases if found important, see PR40816. 4754 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4755 if (isScalarWithPredication(I, VF)) { 4756 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4757 << *I << "\n"); 4758 return; 4759 } 4760 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4761 Worklist.insert(I); 4762 }; 4763 4764 // Start with the conditional branch. If the branch condition is an 4765 // instruction contained in the loop that is only used by the branch, it is 4766 // uniform. 4767 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4768 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4769 addToWorklistIfAllowed(Cmp); 4770 4771 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4772 // are pointers that are treated like consecutive pointers during 4773 // vectorization. The pointer operands of interleaved accesses are an 4774 // example. 4775 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4776 4777 // Holds pointer operands of instructions that are possibly non-uniform. 4778 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4779 4780 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4781 InstWidening WideningDecision = getWideningDecision(I, VF); 4782 assert(WideningDecision != CM_Unknown && 4783 "Widening decision should be ready at this moment"); 4784 4785 return (WideningDecision == CM_Widen || 4786 WideningDecision == CM_Widen_Reverse || 4787 WideningDecision == CM_Interleave); 4788 }; 4789 // Iterate over the instructions in the loop, and collect all 4790 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4791 // that a consecutive-like pointer operand will be scalarized, we collect it 4792 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4793 // getelementptr instruction can be used by both vectorized and scalarized 4794 // memory instructions. For example, if a loop loads and stores from the same 4795 // location, but the store is conditional, the store will be scalarized, and 4796 // the getelementptr won't remain uniform. 4797 for (auto *BB : TheLoop->blocks()) 4798 for (auto &I : *BB) { 4799 // If there's no pointer operand, there's nothing to do. 4800 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4801 if (!Ptr) 4802 continue; 4803 4804 // True if all users of Ptr are memory accesses that have Ptr as their 4805 // pointer operand. 4806 auto UsersAreMemAccesses = 4807 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4808 return getLoadStorePointerOperand(U) == Ptr; 4809 }); 4810 4811 // Ensure the memory instruction will not be scalarized or used by 4812 // gather/scatter, making its pointer operand non-uniform. If the pointer 4813 // operand is used by any instruction other than a memory access, we 4814 // conservatively assume the pointer operand may be non-uniform. 4815 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4816 PossibleNonUniformPtrs.insert(Ptr); 4817 4818 // If the memory instruction will be vectorized and its pointer operand 4819 // is consecutive-like, or interleaving - the pointer operand should 4820 // remain uniform. 4821 else 4822 ConsecutiveLikePtrs.insert(Ptr); 4823 } 4824 4825 // Add to the Worklist all consecutive and consecutive-like pointers that 4826 // aren't also identified as possibly non-uniform. 4827 for (auto *V : ConsecutiveLikePtrs) 4828 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) 4829 addToWorklistIfAllowed(V); 4830 4831 // Expand Worklist in topological order: whenever a new instruction 4832 // is added , its users should be already inside Worklist. It ensures 4833 // a uniform instruction will only be used by uniform instructions. 4834 unsigned idx = 0; 4835 while (idx != Worklist.size()) { 4836 Instruction *I = Worklist[idx++]; 4837 4838 for (auto OV : I->operand_values()) { 4839 // isOutOfScope operands cannot be uniform instructions. 4840 if (isOutOfScope(OV)) 4841 continue; 4842 // First order recurrence Phi's should typically be considered 4843 // non-uniform. 4844 auto *OP = dyn_cast<PHINode>(OV); 4845 if (OP && Legal->isFirstOrderRecurrence(OP)) 4846 continue; 4847 // If all the users of the operand are uniform, then add the 4848 // operand into the uniform worklist. 4849 auto *OI = cast<Instruction>(OV); 4850 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4851 auto *J = cast<Instruction>(U); 4852 return Worklist.count(J) || 4853 (OI == getLoadStorePointerOperand(J) && 4854 isUniformDecision(J, VF)); 4855 })) 4856 addToWorklistIfAllowed(OI); 4857 } 4858 } 4859 4860 // Returns true if Ptr is the pointer operand of a memory access instruction 4861 // I, and I is known to not require scalarization. 4862 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4863 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4864 }; 4865 4866 // For an instruction to be added into Worklist above, all its users inside 4867 // the loop should also be in Worklist. However, this condition cannot be 4868 // true for phi nodes that form a cyclic dependence. We must process phi 4869 // nodes separately. An induction variable will remain uniform if all users 4870 // of the induction variable and induction variable update remain uniform. 4871 // The code below handles both pointer and non-pointer induction variables. 4872 for (auto &Induction : Legal->getInductionVars()) { 4873 auto *Ind = Induction.first; 4874 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4875 4876 // Determine if all users of the induction variable are uniform after 4877 // vectorization. 4878 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4879 auto *I = cast<Instruction>(U); 4880 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4881 isVectorizedMemAccessUse(I, Ind); 4882 }); 4883 if (!UniformInd) 4884 continue; 4885 4886 // Determine if all users of the induction variable update instruction are 4887 // uniform after vectorization. 4888 auto UniformIndUpdate = 4889 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4890 auto *I = cast<Instruction>(U); 4891 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4892 isVectorizedMemAccessUse(I, IndUpdate); 4893 }); 4894 if (!UniformIndUpdate) 4895 continue; 4896 4897 // The induction variable and its update instruction will remain uniform. 4898 addToWorklistIfAllowed(Ind); 4899 addToWorklistIfAllowed(IndUpdate); 4900 } 4901 4902 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4903 } 4904 4905 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4906 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4907 4908 if (Legal->getRuntimePointerChecking()->Need) { 4909 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4910 "runtime pointer checks needed. Enable vectorization of this " 4911 "loop with '#pragma clang loop vectorize(enable)' when " 4912 "compiling with -Os/-Oz", 4913 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4914 return true; 4915 } 4916 4917 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4918 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4919 "runtime SCEV checks needed. Enable vectorization of this " 4920 "loop with '#pragma clang loop vectorize(enable)' when " 4921 "compiling with -Os/-Oz", 4922 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4923 return true; 4924 } 4925 4926 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4927 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4928 reportVectorizationFailure("Runtime stride check is required with -Os/-Oz", 4929 "runtime stride == 1 checks needed. Enable vectorization of " 4930 "this loop with '#pragma clang loop vectorize(enable)' when " 4931 "compiling with -Os/-Oz", 4932 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4933 return true; 4934 } 4935 4936 return false; 4937 } 4938 4939 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() { 4940 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4941 // TODO: It may by useful to do since it's still likely to be dynamically 4942 // uniform if the target can skip. 4943 reportVectorizationFailure( 4944 "Not inserting runtime ptr check for divergent target", 4945 "runtime pointer checks needed. Not enabled for divergent target", 4946 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 4947 return None; 4948 } 4949 4950 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4951 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4952 if (TC == 1) { 4953 reportVectorizationFailure("Single iteration (non) loop", 4954 "loop trip count is one, irrelevant for vectorization", 4955 "SingleIterationLoop", ORE, TheLoop); 4956 return None; 4957 } 4958 4959 switch (ScalarEpilogueStatus) { 4960 case CM_ScalarEpilogueAllowed: 4961 return computeFeasibleMaxVF(TC); 4962 case CM_ScalarEpilogueNotNeededUsePredicate: 4963 LLVM_DEBUG( 4964 dbgs() << "LV: vector predicate hint/switch found.\n" 4965 << "LV: Not allowing scalar epilogue, creating predicated " 4966 << "vector loop.\n"); 4967 break; 4968 case CM_ScalarEpilogueNotAllowedLowTripLoop: 4969 // fallthrough as a special case of OptForSize 4970 case CM_ScalarEpilogueNotAllowedOptSize: 4971 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 4972 LLVM_DEBUG( 4973 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4974 else 4975 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 4976 << "count.\n"); 4977 4978 // Bail if runtime checks are required, which are not good when optimising 4979 // for size. 4980 if (runtimeChecksRequired()) 4981 return None; 4982 break; 4983 } 4984 4985 // Now try the tail folding 4986 4987 // Invalidate interleave groups that require an epilogue if we can't mask 4988 // the interleave-group. 4989 if (!useMaskedInterleavedAccesses(TTI)) { 4990 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 4991 "No decisions should have been taken at this point"); 4992 // Note: There is no need to invalidate any cost modeling decisions here, as 4993 // non where taken so far. 4994 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4995 } 4996 4997 unsigned MaxVF = computeFeasibleMaxVF(TC); 4998 if (TC > 0 && TC % MaxVF == 0) { 4999 // Accept MaxVF if we do not have a tail. 5000 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5001 return MaxVF; 5002 } 5003 5004 // If we don't know the precise trip count, or if the trip count that we 5005 // found modulo the vectorization factor is not zero, try to fold the tail 5006 // by masking. 5007 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5008 if (Legal->prepareToFoldTailByMasking()) { 5009 FoldTailByMasking = true; 5010 return MaxVF; 5011 } 5012 5013 if (TC == 0) { 5014 reportVectorizationFailure( 5015 "Unable to calculate the loop count due to complex control flow", 5016 "unable to calculate the loop count due to complex control flow", 5017 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5018 return None; 5019 } 5020 5021 reportVectorizationFailure( 5022 "Cannot optimize for size and vectorize at the same time.", 5023 "cannot optimize for size and vectorize at the same time. " 5024 "Enable vectorization of this loop with '#pragma clang loop " 5025 "vectorize(enable)' when compiling with -Os/-Oz", 5026 "NoTailLoopWithOptForSize", ORE, TheLoop); 5027 return None; 5028 } 5029 5030 unsigned 5031 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) { 5032 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5033 unsigned SmallestType, WidestType; 5034 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5035 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5036 5037 // Get the maximum safe dependence distance in bits computed by LAA. 5038 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5039 // the memory accesses that is most restrictive (involved in the smallest 5040 // dependence distance). 5041 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 5042 5043 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 5044 5045 unsigned MaxVectorSize = WidestRegister / WidestType; 5046 5047 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5048 << " / " << WidestType << " bits.\n"); 5049 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5050 << WidestRegister << " bits.\n"); 5051 5052 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 5053 " into one vector!"); 5054 if (MaxVectorSize == 0) { 5055 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5056 MaxVectorSize = 1; 5057 return MaxVectorSize; 5058 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 5059 isPowerOf2_32(ConstTripCount)) { 5060 // We need to clamp the VF to be the ConstTripCount. There is no point in 5061 // choosing a higher viable VF as done in the loop below. 5062 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5063 << ConstTripCount << "\n"); 5064 MaxVectorSize = ConstTripCount; 5065 return MaxVectorSize; 5066 } 5067 5068 unsigned MaxVF = MaxVectorSize; 5069 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5070 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5071 // Collect all viable vectorization factors larger than the default MaxVF 5072 // (i.e. MaxVectorSize). 5073 SmallVector<unsigned, 8> VFs; 5074 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5075 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 5076 VFs.push_back(VS); 5077 5078 // For each VF calculate its register usage. 5079 auto RUs = calculateRegisterUsage(VFs); 5080 5081 // Select the largest VF which doesn't require more registers than existing 5082 // ones. 5083 for (int i = RUs.size() - 1; i >= 0; --i) { 5084 bool Selected = true; 5085 for (auto& pair : RUs[i].MaxLocalUsers) { 5086 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5087 if (pair.second > TargetNumRegisters) 5088 Selected = false; 5089 } 5090 if (Selected) { 5091 MaxVF = VFs[i]; 5092 break; 5093 } 5094 } 5095 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 5096 if (MaxVF < MinVF) { 5097 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5098 << ") with target's minimum: " << MinVF << '\n'); 5099 MaxVF = MinVF; 5100 } 5101 } 5102 } 5103 return MaxVF; 5104 } 5105 5106 VectorizationFactor 5107 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 5108 float Cost = expectedCost(1).first; 5109 const float ScalarCost = Cost; 5110 unsigned Width = 1; 5111 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5112 5113 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5114 if (ForceVectorization && MaxVF > 1) { 5115 // Ignore scalar width, because the user explicitly wants vectorization. 5116 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5117 // evaluation. 5118 Cost = std::numeric_limits<float>::max(); 5119 } 5120 5121 for (unsigned i = 2; i <= MaxVF; i *= 2) { 5122 // Notice that the vector loop needs to be executed less times, so 5123 // we need to divide the cost of the vector loops by the width of 5124 // the vector elements. 5125 VectorizationCostTy C = expectedCost(i); 5126 float VectorCost = C.first / (float)i; 5127 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5128 << " costs: " << (int)VectorCost << ".\n"); 5129 if (!C.second && !ForceVectorization) { 5130 LLVM_DEBUG( 5131 dbgs() << "LV: Not considering vector loop of width " << i 5132 << " because it will not generate any vector instructions.\n"); 5133 continue; 5134 } 5135 if (VectorCost < Cost) { 5136 Cost = VectorCost; 5137 Width = i; 5138 } 5139 } 5140 5141 if (!EnableCondStoresVectorization && NumPredStores) { 5142 reportVectorizationFailure("There are conditional stores.", 5143 "store that is conditionally executed prevents vectorization", 5144 "ConditionalStore", ORE, TheLoop); 5145 Width = 1; 5146 Cost = ScalarCost; 5147 } 5148 5149 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5150 << "LV: Vectorization seems to be not beneficial, " 5151 << "but was forced by a user.\n"); 5152 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5153 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 5154 return Factor; 5155 } 5156 5157 std::pair<unsigned, unsigned> 5158 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5159 unsigned MinWidth = -1U; 5160 unsigned MaxWidth = 8; 5161 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5162 5163 // For each block. 5164 for (BasicBlock *BB : TheLoop->blocks()) { 5165 // For each instruction in the loop. 5166 for (Instruction &I : BB->instructionsWithoutDebug()) { 5167 Type *T = I.getType(); 5168 5169 // Skip ignored values. 5170 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 5171 continue; 5172 5173 // Only examine Loads, Stores and PHINodes. 5174 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5175 continue; 5176 5177 // Examine PHI nodes that are reduction variables. Update the type to 5178 // account for the recurrence type. 5179 if (auto *PN = dyn_cast<PHINode>(&I)) { 5180 if (!Legal->isReductionVariable(PN)) 5181 continue; 5182 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 5183 T = RdxDesc.getRecurrenceType(); 5184 } 5185 5186 // Examine the stored values. 5187 if (auto *ST = dyn_cast<StoreInst>(&I)) 5188 T = ST->getValueOperand()->getType(); 5189 5190 // Ignore loaded pointer types and stored pointer types that are not 5191 // vectorizable. 5192 // 5193 // FIXME: The check here attempts to predict whether a load or store will 5194 // be vectorized. We only know this for certain after a VF has 5195 // been selected. Here, we assume that if an access can be 5196 // vectorized, it will be. We should also look at extending this 5197 // optimization to non-pointer types. 5198 // 5199 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 5200 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 5201 continue; 5202 5203 MinWidth = std::min(MinWidth, 5204 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5205 MaxWidth = std::max(MaxWidth, 5206 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5207 } 5208 } 5209 5210 return {MinWidth, MaxWidth}; 5211 } 5212 5213 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF, 5214 unsigned LoopCost) { 5215 // -- The interleave heuristics -- 5216 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5217 // There are many micro-architectural considerations that we can't predict 5218 // at this level. For example, frontend pressure (on decode or fetch) due to 5219 // code size, or the number and capabilities of the execution ports. 5220 // 5221 // We use the following heuristics to select the interleave count: 5222 // 1. If the code has reductions, then we interleave to break the cross 5223 // iteration dependency. 5224 // 2. If the loop is really small, then we interleave to reduce the loop 5225 // overhead. 5226 // 3. We don't interleave if we think that we will spill registers to memory 5227 // due to the increased register pressure. 5228 5229 if (!isScalarEpilogueAllowed()) 5230 return 1; 5231 5232 // We used the distance for the interleave count. 5233 if (Legal->getMaxSafeDepDistBytes() != -1U) 5234 return 1; 5235 5236 // Do not interleave loops with a relatively small known or estimated trip 5237 // count. 5238 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5239 if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold) 5240 return 1; 5241 5242 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5243 // We divide by these constants so assume that we have at least one 5244 // instruction that uses at least one register. 5245 for (auto& pair : R.MaxLocalUsers) { 5246 pair.second = std::max(pair.second, 1U); 5247 } 5248 5249 // We calculate the interleave count using the following formula. 5250 // Subtract the number of loop invariants from the number of available 5251 // registers. These registers are used by all of the interleaved instances. 5252 // Next, divide the remaining registers by the number of registers that is 5253 // required by the loop, in order to estimate how many parallel instances 5254 // fit without causing spills. All of this is rounded down if necessary to be 5255 // a power of two. We want power of two interleave count to simplify any 5256 // addressing operations or alignment considerations. 5257 // We also want power of two interleave counts to ensure that the induction 5258 // variable of the vector loop wraps to zero, when tail is folded by masking; 5259 // this currently happens when OptForSize, in which case IC is set to 1 above. 5260 unsigned IC = UINT_MAX; 5261 5262 for (auto& pair : R.MaxLocalUsers) { 5263 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5264 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5265 << " registers of " 5266 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5267 if (VF == 1) { 5268 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5269 TargetNumRegisters = ForceTargetNumScalarRegs; 5270 } else { 5271 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5272 TargetNumRegisters = ForceTargetNumVectorRegs; 5273 } 5274 unsigned MaxLocalUsers = pair.second; 5275 unsigned LoopInvariantRegs = 0; 5276 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5277 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5278 5279 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5280 // Don't count the induction variable as interleaved. 5281 if (EnableIndVarRegisterHeur) { 5282 TmpIC = 5283 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5284 std::max(1U, (MaxLocalUsers - 1))); 5285 } 5286 5287 IC = std::min(IC, TmpIC); 5288 } 5289 5290 // Clamp the interleave ranges to reasonable counts. 5291 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5292 5293 // Check if the user has overridden the max. 5294 if (VF == 1) { 5295 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5296 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5297 } else { 5298 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5299 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5300 } 5301 5302 // If trip count is known or estimated compile time constant, limit the 5303 // interleave count to be less than the trip count divided by VF. 5304 if (BestKnownTC) { 5305 MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount); 5306 } 5307 5308 // If we did not calculate the cost for VF (because the user selected the VF) 5309 // then we calculate the cost of VF here. 5310 if (LoopCost == 0) 5311 LoopCost = expectedCost(VF).first; 5312 5313 assert(LoopCost && "Non-zero loop cost expected"); 5314 5315 // Clamp the calculated IC to be between the 1 and the max interleave count 5316 // that the target and trip count allows. 5317 if (IC > MaxInterleaveCount) 5318 IC = MaxInterleaveCount; 5319 else if (IC < 1) 5320 IC = 1; 5321 5322 // Interleave if we vectorized this loop and there is a reduction that could 5323 // benefit from interleaving. 5324 if (VF > 1 && !Legal->getReductionVars().empty()) { 5325 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5326 return IC; 5327 } 5328 5329 // Note that if we've already vectorized the loop we will have done the 5330 // runtime check and so interleaving won't require further checks. 5331 bool InterleavingRequiresRuntimePointerCheck = 5332 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5333 5334 // We want to interleave small loops in order to reduce the loop overhead and 5335 // potentially expose ILP opportunities. 5336 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5337 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5338 // We assume that the cost overhead is 1 and we use the cost model 5339 // to estimate the cost of the loop and interleave until the cost of the 5340 // loop overhead is about 5% of the cost of the loop. 5341 unsigned SmallIC = 5342 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5343 5344 // Interleave until store/load ports (estimated by max interleave count) are 5345 // saturated. 5346 unsigned NumStores = Legal->getNumStores(); 5347 unsigned NumLoads = Legal->getNumLoads(); 5348 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5349 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5350 5351 // If we have a scalar reduction (vector reductions are already dealt with 5352 // by this point), we can increase the critical path length if the loop 5353 // we're interleaving is inside another loop. Limit, by default to 2, so the 5354 // critical path only gets increased by one reduction operation. 5355 if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) { 5356 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5357 SmallIC = std::min(SmallIC, F); 5358 StoresIC = std::min(StoresIC, F); 5359 LoadsIC = std::min(LoadsIC, F); 5360 } 5361 5362 if (EnableLoadStoreRuntimeInterleave && 5363 std::max(StoresIC, LoadsIC) > SmallIC) { 5364 LLVM_DEBUG( 5365 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5366 return std::max(StoresIC, LoadsIC); 5367 } 5368 5369 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5370 return SmallIC; 5371 } 5372 5373 // Interleave if this is a large loop (small loops are already dealt with by 5374 // this point) that could benefit from interleaving. 5375 bool HasReductions = !Legal->getReductionVars().empty(); 5376 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5377 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5378 return IC; 5379 } 5380 5381 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5382 return 1; 5383 } 5384 5385 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5386 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5387 // This function calculates the register usage by measuring the highest number 5388 // of values that are alive at a single location. Obviously, this is a very 5389 // rough estimation. We scan the loop in a topological order in order and 5390 // assign a number to each instruction. We use RPO to ensure that defs are 5391 // met before their users. We assume that each instruction that has in-loop 5392 // users starts an interval. We record every time that an in-loop value is 5393 // used, so we have a list of the first and last occurrences of each 5394 // instruction. Next, we transpose this data structure into a multi map that 5395 // holds the list of intervals that *end* at a specific location. This multi 5396 // map allows us to perform a linear search. We scan the instructions linearly 5397 // and record each time that a new interval starts, by placing it in a set. 5398 // If we find this value in the multi-map then we remove it from the set. 5399 // The max register usage is the maximum size of the set. 5400 // We also search for instructions that are defined outside the loop, but are 5401 // used inside the loop. We need this number separately from the max-interval 5402 // usage number because when we unroll, loop-invariant values do not take 5403 // more register. 5404 LoopBlocksDFS DFS(TheLoop); 5405 DFS.perform(LI); 5406 5407 RegisterUsage RU; 5408 5409 // Each 'key' in the map opens a new interval. The values 5410 // of the map are the index of the 'last seen' usage of the 5411 // instruction that is the key. 5412 using IntervalMap = DenseMap<Instruction *, unsigned>; 5413 5414 // Maps instruction to its index. 5415 SmallVector<Instruction *, 64> IdxToInstr; 5416 // Marks the end of each interval. 5417 IntervalMap EndPoint; 5418 // Saves the list of instruction indices that are used in the loop. 5419 SmallPtrSet<Instruction *, 8> Ends; 5420 // Saves the list of values that are used in the loop but are 5421 // defined outside the loop, such as arguments and constants. 5422 SmallPtrSet<Value *, 8> LoopInvariants; 5423 5424 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5425 for (Instruction &I : BB->instructionsWithoutDebug()) { 5426 IdxToInstr.push_back(&I); 5427 5428 // Save the end location of each USE. 5429 for (Value *U : I.operands()) { 5430 auto *Instr = dyn_cast<Instruction>(U); 5431 5432 // Ignore non-instruction values such as arguments, constants, etc. 5433 if (!Instr) 5434 continue; 5435 5436 // If this instruction is outside the loop then record it and continue. 5437 if (!TheLoop->contains(Instr)) { 5438 LoopInvariants.insert(Instr); 5439 continue; 5440 } 5441 5442 // Overwrite previous end points. 5443 EndPoint[Instr] = IdxToInstr.size(); 5444 Ends.insert(Instr); 5445 } 5446 } 5447 } 5448 5449 // Saves the list of intervals that end with the index in 'key'. 5450 using InstrList = SmallVector<Instruction *, 2>; 5451 DenseMap<unsigned, InstrList> TransposeEnds; 5452 5453 // Transpose the EndPoints to a list of values that end at each index. 5454 for (auto &Interval : EndPoint) 5455 TransposeEnds[Interval.second].push_back(Interval.first); 5456 5457 SmallPtrSet<Instruction *, 8> OpenIntervals; 5458 5459 // Get the size of the widest register. 5460 unsigned MaxSafeDepDist = -1U; 5461 if (Legal->getMaxSafeDepDistBytes() != -1U) 5462 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5463 unsigned WidestRegister = 5464 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5465 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5466 5467 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5468 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5469 5470 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5471 5472 // A lambda that gets the register usage for the given type and VF. 5473 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5474 if (Ty->isTokenTy()) 5475 return 0U; 5476 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5477 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5478 }; 5479 5480 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5481 Instruction *I = IdxToInstr[i]; 5482 5483 // Remove all of the instructions that end at this location. 5484 InstrList &List = TransposeEnds[i]; 5485 for (Instruction *ToRemove : List) 5486 OpenIntervals.erase(ToRemove); 5487 5488 // Ignore instructions that are never used within the loop. 5489 if (Ends.find(I) == Ends.end()) 5490 continue; 5491 5492 // Skip ignored values. 5493 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5494 continue; 5495 5496 // For each VF find the maximum usage of registers. 5497 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5498 // Count the number of live intervals. 5499 SmallMapVector<unsigned, unsigned, 4> RegUsage; 5500 5501 if (VFs[j] == 1) { 5502 for (auto Inst : OpenIntervals) { 5503 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5504 if (RegUsage.find(ClassID) == RegUsage.end()) 5505 RegUsage[ClassID] = 1; 5506 else 5507 RegUsage[ClassID] += 1; 5508 } 5509 } else { 5510 collectUniformsAndScalars(VFs[j]); 5511 for (auto Inst : OpenIntervals) { 5512 // Skip ignored values for VF > 1. 5513 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end()) 5514 continue; 5515 if (isScalarAfterVectorization(Inst, VFs[j])) { 5516 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5517 if (RegUsage.find(ClassID) == RegUsage.end()) 5518 RegUsage[ClassID] = 1; 5519 else 5520 RegUsage[ClassID] += 1; 5521 } else { 5522 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 5523 if (RegUsage.find(ClassID) == RegUsage.end()) 5524 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 5525 else 5526 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 5527 } 5528 } 5529 } 5530 5531 for (auto& pair : RegUsage) { 5532 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 5533 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 5534 else 5535 MaxUsages[j][pair.first] = pair.second; 5536 } 5537 } 5538 5539 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5540 << OpenIntervals.size() << '\n'); 5541 5542 // Add the current instruction to the list of open intervals. 5543 OpenIntervals.insert(I); 5544 } 5545 5546 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5547 SmallMapVector<unsigned, unsigned, 4> Invariant; 5548 5549 for (auto Inst : LoopInvariants) { 5550 unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 5551 unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType()); 5552 if (Invariant.find(ClassID) == Invariant.end()) 5553 Invariant[ClassID] = Usage; 5554 else 5555 Invariant[ClassID] += Usage; 5556 } 5557 5558 LLVM_DEBUG({ 5559 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 5560 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 5561 << " item\n"; 5562 for (const auto &pair : MaxUsages[i]) { 5563 dbgs() << "LV(REG): RegisterClass: " 5564 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 5565 << " registers\n"; 5566 } 5567 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 5568 << " item\n"; 5569 for (const auto &pair : Invariant) { 5570 dbgs() << "LV(REG): RegisterClass: " 5571 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 5572 << " registers\n"; 5573 } 5574 }); 5575 5576 RU.LoopInvariantRegs = Invariant; 5577 RU.MaxLocalUsers = MaxUsages[i]; 5578 RUs[i] = RU; 5579 } 5580 5581 return RUs; 5582 } 5583 5584 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5585 // TODO: Cost model for emulated masked load/store is completely 5586 // broken. This hack guides the cost model to use an artificially 5587 // high enough value to practically disable vectorization with such 5588 // operations, except where previously deployed legality hack allowed 5589 // using very low cost values. This is to avoid regressions coming simply 5590 // from moving "masked load/store" check from legality to cost model. 5591 // Masked Load/Gather emulation was previously never allowed. 5592 // Limited number of Masked Store/Scatter emulation was allowed. 5593 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5594 return isa<LoadInst>(I) || 5595 (isa<StoreInst>(I) && 5596 NumPredStores > NumberOfStoresToPredicate); 5597 } 5598 5599 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5600 // If we aren't vectorizing the loop, or if we've already collected the 5601 // instructions to scalarize, there's nothing to do. Collection may already 5602 // have occurred if we have a user-selected VF and are now computing the 5603 // expected cost for interleaving. 5604 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5605 return; 5606 5607 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5608 // not profitable to scalarize any instructions, the presence of VF in the 5609 // map will indicate that we've analyzed it already. 5610 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5611 5612 // Find all the instructions that are scalar with predication in the loop and 5613 // determine if it would be better to not if-convert the blocks they are in. 5614 // If so, we also record the instructions to scalarize. 5615 for (BasicBlock *BB : TheLoop->blocks()) { 5616 if (!blockNeedsPredication(BB)) 5617 continue; 5618 for (Instruction &I : *BB) 5619 if (isScalarWithPredication(&I)) { 5620 ScalarCostsTy ScalarCosts; 5621 // Do not apply discount logic if hacked cost is needed 5622 // for emulated masked memrefs. 5623 if (!useEmulatedMaskMemRefHack(&I) && 5624 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5625 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5626 // Remember that BB will remain after vectorization. 5627 PredicatedBBsAfterVectorization.insert(BB); 5628 } 5629 } 5630 } 5631 5632 int LoopVectorizationCostModel::computePredInstDiscount( 5633 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5634 unsigned VF) { 5635 assert(!isUniformAfterVectorization(PredInst, VF) && 5636 "Instruction marked uniform-after-vectorization will be predicated"); 5637 5638 // Initialize the discount to zero, meaning that the scalar version and the 5639 // vector version cost the same. 5640 int Discount = 0; 5641 5642 // Holds instructions to analyze. The instructions we visit are mapped in 5643 // ScalarCosts. Those instructions are the ones that would be scalarized if 5644 // we find that the scalar version costs less. 5645 SmallVector<Instruction *, 8> Worklist; 5646 5647 // Returns true if the given instruction can be scalarized. 5648 auto canBeScalarized = [&](Instruction *I) -> bool { 5649 // We only attempt to scalarize instructions forming a single-use chain 5650 // from the original predicated block that would otherwise be vectorized. 5651 // Although not strictly necessary, we give up on instructions we know will 5652 // already be scalar to avoid traversing chains that are unlikely to be 5653 // beneficial. 5654 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5655 isScalarAfterVectorization(I, VF)) 5656 return false; 5657 5658 // If the instruction is scalar with predication, it will be analyzed 5659 // separately. We ignore it within the context of PredInst. 5660 if (isScalarWithPredication(I)) 5661 return false; 5662 5663 // If any of the instruction's operands are uniform after vectorization, 5664 // the instruction cannot be scalarized. This prevents, for example, a 5665 // masked load from being scalarized. 5666 // 5667 // We assume we will only emit a value for lane zero of an instruction 5668 // marked uniform after vectorization, rather than VF identical values. 5669 // Thus, if we scalarize an instruction that uses a uniform, we would 5670 // create uses of values corresponding to the lanes we aren't emitting code 5671 // for. This behavior can be changed by allowing getScalarValue to clone 5672 // the lane zero values for uniforms rather than asserting. 5673 for (Use &U : I->operands()) 5674 if (auto *J = dyn_cast<Instruction>(U.get())) 5675 if (isUniformAfterVectorization(J, VF)) 5676 return false; 5677 5678 // Otherwise, we can scalarize the instruction. 5679 return true; 5680 }; 5681 5682 // Compute the expected cost discount from scalarizing the entire expression 5683 // feeding the predicated instruction. We currently only consider expressions 5684 // that are single-use instruction chains. 5685 Worklist.push_back(PredInst); 5686 while (!Worklist.empty()) { 5687 Instruction *I = Worklist.pop_back_val(); 5688 5689 // If we've already analyzed the instruction, there's nothing to do. 5690 if (ScalarCosts.find(I) != ScalarCosts.end()) 5691 continue; 5692 5693 // Compute the cost of the vector instruction. Note that this cost already 5694 // includes the scalarization overhead of the predicated instruction. 5695 unsigned VectorCost = getInstructionCost(I, VF).first; 5696 5697 // Compute the cost of the scalarized instruction. This cost is the cost of 5698 // the instruction as if it wasn't if-converted and instead remained in the 5699 // predicated block. We will scale this cost by block probability after 5700 // computing the scalarization overhead. 5701 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5702 5703 // Compute the scalarization overhead of needed insertelement instructions 5704 // and phi nodes. 5705 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5706 ScalarCost += 5707 TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 5708 APInt::getAllOnesValue(VF), true, false); 5709 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5710 } 5711 5712 // Compute the scalarization overhead of needed extractelement 5713 // instructions. For each of the instruction's operands, if the operand can 5714 // be scalarized, add it to the worklist; otherwise, account for the 5715 // overhead. 5716 for (Use &U : I->operands()) 5717 if (auto *J = dyn_cast<Instruction>(U.get())) { 5718 assert(VectorType::isValidElementType(J->getType()) && 5719 "Instruction has non-scalar type"); 5720 if (canBeScalarized(J)) 5721 Worklist.push_back(J); 5722 else if (needsExtract(J, VF)) 5723 ScalarCost += TTI.getScalarizationOverhead( 5724 ToVectorTy(J->getType(), VF), APInt::getAllOnesValue(VF), false, 5725 true); 5726 } 5727 5728 // Scale the total scalar cost by block probability. 5729 ScalarCost /= getReciprocalPredBlockProb(); 5730 5731 // Compute the discount. A non-negative discount means the vector version 5732 // of the instruction costs more, and scalarizing would be beneficial. 5733 Discount += VectorCost - ScalarCost; 5734 ScalarCosts[I] = ScalarCost; 5735 } 5736 5737 return Discount; 5738 } 5739 5740 LoopVectorizationCostModel::VectorizationCostTy 5741 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5742 VectorizationCostTy Cost; 5743 5744 // For each block. 5745 for (BasicBlock *BB : TheLoop->blocks()) { 5746 VectorizationCostTy BlockCost; 5747 5748 // For each instruction in the old loop. 5749 for (Instruction &I : BB->instructionsWithoutDebug()) { 5750 // Skip ignored values. 5751 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5752 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5753 continue; 5754 5755 VectorizationCostTy C = getInstructionCost(&I, VF); 5756 5757 // Check if we should override the cost. 5758 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5759 C.first = ForceTargetInstructionCost; 5760 5761 BlockCost.first += C.first; 5762 BlockCost.second |= C.second; 5763 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5764 << " for VF " << VF << " For instruction: " << I 5765 << '\n'); 5766 } 5767 5768 // If we are vectorizing a predicated block, it will have been 5769 // if-converted. This means that the block's instructions (aside from 5770 // stores and instructions that may divide by zero) will now be 5771 // unconditionally executed. For the scalar case, we may not always execute 5772 // the predicated block. Thus, scale the block's cost by the probability of 5773 // executing it. 5774 if (VF == 1 && blockNeedsPredication(BB)) 5775 BlockCost.first /= getReciprocalPredBlockProb(); 5776 5777 Cost.first += BlockCost.first; 5778 Cost.second |= BlockCost.second; 5779 } 5780 5781 return Cost; 5782 } 5783 5784 /// Gets Address Access SCEV after verifying that the access pattern 5785 /// is loop invariant except the induction variable dependence. 5786 /// 5787 /// This SCEV can be sent to the Target in order to estimate the address 5788 /// calculation cost. 5789 static const SCEV *getAddressAccessSCEV( 5790 Value *Ptr, 5791 LoopVectorizationLegality *Legal, 5792 PredicatedScalarEvolution &PSE, 5793 const Loop *TheLoop) { 5794 5795 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5796 if (!Gep) 5797 return nullptr; 5798 5799 // We are looking for a gep with all loop invariant indices except for one 5800 // which should be an induction variable. 5801 auto SE = PSE.getSE(); 5802 unsigned NumOperands = Gep->getNumOperands(); 5803 for (unsigned i = 1; i < NumOperands; ++i) { 5804 Value *Opd = Gep->getOperand(i); 5805 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5806 !Legal->isInductionVariable(Opd)) 5807 return nullptr; 5808 } 5809 5810 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5811 return PSE.getSCEV(Ptr); 5812 } 5813 5814 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5815 return Legal->hasStride(I->getOperand(0)) || 5816 Legal->hasStride(I->getOperand(1)); 5817 } 5818 5819 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5820 unsigned VF) { 5821 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5822 Type *ValTy = getMemInstValueType(I); 5823 auto SE = PSE.getSE(); 5824 5825 unsigned AS = getLoadStoreAddressSpace(I); 5826 Value *Ptr = getLoadStorePointerOperand(I); 5827 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5828 5829 // Figure out whether the access is strided and get the stride value 5830 // if it's known in compile time 5831 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5832 5833 // Get the cost of the scalar memory instruction and address computation. 5834 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5835 5836 // Don't pass *I here, since it is scalar but will actually be part of a 5837 // vectorized loop where the user of it is a vectorized instruction. 5838 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5839 Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 5840 Alignment, AS); 5841 5842 // Get the overhead of the extractelement and insertelement instructions 5843 // we might create due to scalarization. 5844 Cost += getScalarizationOverhead(I, VF); 5845 5846 // If we have a predicated store, it may not be executed for each vector 5847 // lane. Scale the cost by the probability of executing the predicated 5848 // block. 5849 if (isPredicatedInst(I)) { 5850 Cost /= getReciprocalPredBlockProb(); 5851 5852 if (useEmulatedMaskMemRefHack(I)) 5853 // Artificially setting to a high enough value to practically disable 5854 // vectorization with such operations. 5855 Cost = 3000000; 5856 } 5857 5858 return Cost; 5859 } 5860 5861 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5862 unsigned VF) { 5863 Type *ValTy = getMemInstValueType(I); 5864 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5865 Value *Ptr = getLoadStorePointerOperand(I); 5866 unsigned AS = getLoadStoreAddressSpace(I); 5867 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5868 5869 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5870 "Stride should be 1 or -1 for consecutive memory access"); 5871 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5872 unsigned Cost = 0; 5873 if (Legal->isMaskRequired(I)) 5874 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, 5875 Alignment ? Alignment->value() : 0, AS); 5876 else 5877 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I); 5878 5879 bool Reverse = ConsecutiveStride < 0; 5880 if (Reverse) 5881 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5882 return Cost; 5883 } 5884 5885 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5886 unsigned VF) { 5887 Type *ValTy = getMemInstValueType(I); 5888 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5889 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5890 unsigned AS = getLoadStoreAddressSpace(I); 5891 if (isa<LoadInst>(I)) { 5892 return TTI.getAddressComputationCost(ValTy) + 5893 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) + 5894 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5895 } 5896 StoreInst *SI = cast<StoreInst>(I); 5897 5898 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5899 return TTI.getAddressComputationCost(ValTy) + 5900 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS) + 5901 (isLoopInvariantStoreValue 5902 ? 0 5903 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 5904 VF - 1)); 5905 } 5906 5907 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5908 unsigned VF) { 5909 Type *ValTy = getMemInstValueType(I); 5910 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5911 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5912 Value *Ptr = getLoadStorePointerOperand(I); 5913 5914 return TTI.getAddressComputationCost(VectorTy) + 5915 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5916 Legal->isMaskRequired(I), 5917 Alignment ? Alignment->value() : 0, I); 5918 } 5919 5920 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5921 unsigned VF) { 5922 Type *ValTy = getMemInstValueType(I); 5923 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5924 unsigned AS = getLoadStoreAddressSpace(I); 5925 5926 auto Group = getInterleavedAccessGroup(I); 5927 assert(Group && "Fail to get an interleaved access group."); 5928 5929 unsigned InterleaveFactor = Group->getFactor(); 5930 VectorType *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5931 5932 // Holds the indices of existing members in an interleaved load group. 5933 // An interleaved store group doesn't need this as it doesn't allow gaps. 5934 SmallVector<unsigned, 4> Indices; 5935 if (isa<LoadInst>(I)) { 5936 for (unsigned i = 0; i < InterleaveFactor; i++) 5937 if (Group->getMember(i)) 5938 Indices.push_back(i); 5939 } 5940 5941 // Calculate the cost of the whole interleaved group. 5942 bool UseMaskForGaps = 5943 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5944 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5945 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5946 Group->getAlign().value(), AS, Legal->isMaskRequired(I), UseMaskForGaps); 5947 5948 if (Group->isReverse()) { 5949 // TODO: Add support for reversed masked interleaved access. 5950 assert(!Legal->isMaskRequired(I) && 5951 "Reverse masked interleaved access not supported."); 5952 Cost += Group->getNumMembers() * 5953 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5954 } 5955 return Cost; 5956 } 5957 5958 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5959 unsigned VF) { 5960 // Calculate scalar cost only. Vectorization cost should be ready at this 5961 // moment. 5962 if (VF == 1) { 5963 Type *ValTy = getMemInstValueType(I); 5964 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5965 unsigned AS = getLoadStoreAddressSpace(I); 5966 5967 return TTI.getAddressComputationCost(ValTy) + 5968 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I); 5969 } 5970 return getWideningCost(I, VF); 5971 } 5972 5973 LoopVectorizationCostModel::VectorizationCostTy 5974 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5975 // If we know that this instruction will remain uniform, check the cost of 5976 // the scalar version. 5977 if (isUniformAfterVectorization(I, VF)) 5978 VF = 1; 5979 5980 if (VF > 1 && isProfitableToScalarize(I, VF)) 5981 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5982 5983 // Forced scalars do not have any scalarization overhead. 5984 auto ForcedScalar = ForcedScalars.find(VF); 5985 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 5986 auto InstSet = ForcedScalar->second; 5987 if (InstSet.find(I) != InstSet.end()) 5988 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5989 } 5990 5991 Type *VectorTy; 5992 unsigned C = getInstructionCost(I, VF, VectorTy); 5993 5994 bool TypeNotScalarized = 5995 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 5996 return VectorizationCostTy(C, TypeNotScalarized); 5997 } 5998 5999 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6000 unsigned VF) { 6001 6002 if (VF == 1) 6003 return 0; 6004 6005 unsigned Cost = 0; 6006 Type *RetTy = ToVectorTy(I->getType(), VF); 6007 if (!RetTy->isVoidTy() && 6008 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6009 Cost += TTI.getScalarizationOverhead(RetTy, APInt::getAllOnesValue(VF), 6010 true, false); 6011 6012 // Some targets keep addresses scalar. 6013 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6014 return Cost; 6015 6016 // Some targets support efficient element stores. 6017 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6018 return Cost; 6019 6020 // Collect operands to consider. 6021 CallInst *CI = dyn_cast<CallInst>(I); 6022 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 6023 6024 // Skip operands that do not require extraction/scalarization and do not incur 6025 // any overhead. 6026 return Cost + TTI.getOperandsScalarizationOverhead( 6027 filterExtractingOperands(Ops, VF), VF); 6028 } 6029 6030 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 6031 if (VF == 1) 6032 return; 6033 NumPredStores = 0; 6034 for (BasicBlock *BB : TheLoop->blocks()) { 6035 // For each instruction in the old loop. 6036 for (Instruction &I : *BB) { 6037 Value *Ptr = getLoadStorePointerOperand(&I); 6038 if (!Ptr) 6039 continue; 6040 6041 // TODO: We should generate better code and update the cost model for 6042 // predicated uniform stores. Today they are treated as any other 6043 // predicated store (see added test cases in 6044 // invariant-store-vectorization.ll). 6045 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 6046 NumPredStores++; 6047 6048 if (Legal->isUniform(Ptr) && 6049 // Conditional loads and stores should be scalarized and predicated. 6050 // isScalarWithPredication cannot be used here since masked 6051 // gather/scatters are not considered scalar with predication. 6052 !Legal->blockNeedsPredication(I.getParent())) { 6053 // TODO: Avoid replicating loads and stores instead of 6054 // relying on instcombine to remove them. 6055 // Load: Scalar load + broadcast 6056 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6057 unsigned Cost = getUniformMemOpCost(&I, VF); 6058 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6059 continue; 6060 } 6061 6062 // We assume that widening is the best solution when possible. 6063 if (memoryInstructionCanBeWidened(&I, VF)) { 6064 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 6065 int ConsecutiveStride = 6066 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 6067 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6068 "Expected consecutive stride."); 6069 InstWidening Decision = 6070 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6071 setWideningDecision(&I, VF, Decision, Cost); 6072 continue; 6073 } 6074 6075 // Choose between Interleaving, Gather/Scatter or Scalarization. 6076 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 6077 unsigned NumAccesses = 1; 6078 if (isAccessInterleaved(&I)) { 6079 auto Group = getInterleavedAccessGroup(&I); 6080 assert(Group && "Fail to get an interleaved access group."); 6081 6082 // Make one decision for the whole group. 6083 if (getWideningDecision(&I, VF) != CM_Unknown) 6084 continue; 6085 6086 NumAccesses = Group->getNumMembers(); 6087 if (interleavedAccessCanBeWidened(&I, VF)) 6088 InterleaveCost = getInterleaveGroupCost(&I, VF); 6089 } 6090 6091 unsigned GatherScatterCost = 6092 isLegalGatherOrScatter(&I) 6093 ? getGatherScatterCost(&I, VF) * NumAccesses 6094 : std::numeric_limits<unsigned>::max(); 6095 6096 unsigned ScalarizationCost = 6097 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6098 6099 // Choose better solution for the current VF, 6100 // write down this decision and use it during vectorization. 6101 unsigned Cost; 6102 InstWidening Decision; 6103 if (InterleaveCost <= GatherScatterCost && 6104 InterleaveCost < ScalarizationCost) { 6105 Decision = CM_Interleave; 6106 Cost = InterleaveCost; 6107 } else if (GatherScatterCost < ScalarizationCost) { 6108 Decision = CM_GatherScatter; 6109 Cost = GatherScatterCost; 6110 } else { 6111 Decision = CM_Scalarize; 6112 Cost = ScalarizationCost; 6113 } 6114 // If the instructions belongs to an interleave group, the whole group 6115 // receives the same decision. The whole group receives the cost, but 6116 // the cost will actually be assigned to one instruction. 6117 if (auto Group = getInterleavedAccessGroup(&I)) 6118 setWideningDecision(Group, VF, Decision, Cost); 6119 else 6120 setWideningDecision(&I, VF, Decision, Cost); 6121 } 6122 } 6123 6124 // Make sure that any load of address and any other address computation 6125 // remains scalar unless there is gather/scatter support. This avoids 6126 // inevitable extracts into address registers, and also has the benefit of 6127 // activating LSR more, since that pass can't optimize vectorized 6128 // addresses. 6129 if (TTI.prefersVectorizedAddressing()) 6130 return; 6131 6132 // Start with all scalar pointer uses. 6133 SmallPtrSet<Instruction *, 8> AddrDefs; 6134 for (BasicBlock *BB : TheLoop->blocks()) 6135 for (Instruction &I : *BB) { 6136 Instruction *PtrDef = 6137 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6138 if (PtrDef && TheLoop->contains(PtrDef) && 6139 getWideningDecision(&I, VF) != CM_GatherScatter) 6140 AddrDefs.insert(PtrDef); 6141 } 6142 6143 // Add all instructions used to generate the addresses. 6144 SmallVector<Instruction *, 4> Worklist; 6145 for (auto *I : AddrDefs) 6146 Worklist.push_back(I); 6147 while (!Worklist.empty()) { 6148 Instruction *I = Worklist.pop_back_val(); 6149 for (auto &Op : I->operands()) 6150 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6151 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6152 AddrDefs.insert(InstOp).second) 6153 Worklist.push_back(InstOp); 6154 } 6155 6156 for (auto *I : AddrDefs) { 6157 if (isa<LoadInst>(I)) { 6158 // Setting the desired widening decision should ideally be handled in 6159 // by cost functions, but since this involves the task of finding out 6160 // if the loaded register is involved in an address computation, it is 6161 // instead changed here when we know this is the case. 6162 InstWidening Decision = getWideningDecision(I, VF); 6163 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6164 // Scalarize a widened load of address. 6165 setWideningDecision(I, VF, CM_Scalarize, 6166 (VF * getMemoryInstructionCost(I, 1))); 6167 else if (auto Group = getInterleavedAccessGroup(I)) { 6168 // Scalarize an interleave group of address loads. 6169 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6170 if (Instruction *Member = Group->getMember(I)) 6171 setWideningDecision(Member, VF, CM_Scalarize, 6172 (VF * getMemoryInstructionCost(Member, 1))); 6173 } 6174 } 6175 } else 6176 // Make sure I gets scalarized and a cost estimate without 6177 // scalarization overhead. 6178 ForcedScalars[VF].insert(I); 6179 } 6180 } 6181 6182 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6183 unsigned VF, 6184 Type *&VectorTy) { 6185 Type *RetTy = I->getType(); 6186 if (canTruncateToMinimalBitwidth(I, VF)) 6187 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6188 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 6189 auto SE = PSE.getSE(); 6190 6191 // TODO: We need to estimate the cost of intrinsic calls. 6192 switch (I->getOpcode()) { 6193 case Instruction::GetElementPtr: 6194 // We mark this instruction as zero-cost because the cost of GEPs in 6195 // vectorized code depends on whether the corresponding memory instruction 6196 // is scalarized or not. Therefore, we handle GEPs with the memory 6197 // instruction cost. 6198 return 0; 6199 case Instruction::Br: { 6200 // In cases of scalarized and predicated instructions, there will be VF 6201 // predicated blocks in the vectorized loop. Each branch around these 6202 // blocks requires also an extract of its vector compare i1 element. 6203 bool ScalarPredicatedBB = false; 6204 BranchInst *BI = cast<BranchInst>(I); 6205 if (VF > 1 && BI->isConditional() && 6206 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 6207 PredicatedBBsAfterVectorization.end() || 6208 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 6209 PredicatedBBsAfterVectorization.end())) 6210 ScalarPredicatedBB = true; 6211 6212 if (ScalarPredicatedBB) { 6213 // Return cost for branches around scalarized and predicated blocks. 6214 Type *Vec_i1Ty = 6215 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 6216 return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF), 6217 false, true) + 6218 (TTI.getCFInstrCost(Instruction::Br) * VF)); 6219 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 6220 // The back-edge branch will remain, as will all scalar branches. 6221 return TTI.getCFInstrCost(Instruction::Br); 6222 else 6223 // This branch will be eliminated by if-conversion. 6224 return 0; 6225 // Note: We currently assume zero cost for an unconditional branch inside 6226 // a predicated block since it will become a fall-through, although we 6227 // may decide in the future to call TTI for all branches. 6228 } 6229 case Instruction::PHI: { 6230 auto *Phi = cast<PHINode>(I); 6231 6232 // First-order recurrences are replaced by vector shuffles inside the loop. 6233 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 6234 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6235 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6236 cast<VectorType>(VectorTy), VF - 1, 6237 VectorType::get(RetTy, 1)); 6238 6239 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 6240 // converted into select instructions. We require N - 1 selects per phi 6241 // node, where N is the number of incoming values. 6242 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 6243 return (Phi->getNumIncomingValues() - 1) * 6244 TTI.getCmpSelInstrCost( 6245 Instruction::Select, ToVectorTy(Phi->getType(), VF), 6246 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)); 6247 6248 return TTI.getCFInstrCost(Instruction::PHI); 6249 } 6250 case Instruction::UDiv: 6251 case Instruction::SDiv: 6252 case Instruction::URem: 6253 case Instruction::SRem: 6254 // If we have a predicated instruction, it may not be executed for each 6255 // vector lane. Get the scalarization cost and scale this amount by the 6256 // probability of executing the predicated block. If the instruction is not 6257 // predicated, we fall through to the next case. 6258 if (VF > 1 && isScalarWithPredication(I)) { 6259 unsigned Cost = 0; 6260 6261 // These instructions have a non-void type, so account for the phi nodes 6262 // that we will create. This cost is likely to be zero. The phi node 6263 // cost, if any, should be scaled by the block probability because it 6264 // models a copy at the end of each predicated block. 6265 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 6266 6267 // The cost of the non-predicated instruction. 6268 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 6269 6270 // The cost of insertelement and extractelement instructions needed for 6271 // scalarization. 6272 Cost += getScalarizationOverhead(I, VF); 6273 6274 // Scale the cost by the probability of executing the predicated blocks. 6275 // This assumes the predicated block for each vector lane is equally 6276 // likely. 6277 return Cost / getReciprocalPredBlockProb(); 6278 } 6279 LLVM_FALLTHROUGH; 6280 case Instruction::Add: 6281 case Instruction::FAdd: 6282 case Instruction::Sub: 6283 case Instruction::FSub: 6284 case Instruction::Mul: 6285 case Instruction::FMul: 6286 case Instruction::FDiv: 6287 case Instruction::FRem: 6288 case Instruction::Shl: 6289 case Instruction::LShr: 6290 case Instruction::AShr: 6291 case Instruction::And: 6292 case Instruction::Or: 6293 case Instruction::Xor: { 6294 // Since we will replace the stride by 1 the multiplication should go away. 6295 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6296 return 0; 6297 // Certain instructions can be cheaper to vectorize if they have a constant 6298 // second vector operand. One example of this are shifts on x86. 6299 Value *Op2 = I->getOperand(1); 6300 TargetTransformInfo::OperandValueProperties Op2VP; 6301 TargetTransformInfo::OperandValueKind Op2VK = 6302 TTI.getOperandInfo(Op2, Op2VP); 6303 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 6304 Op2VK = TargetTransformInfo::OK_UniformValue; 6305 6306 SmallVector<const Value *, 4> Operands(I->operand_values()); 6307 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6308 return N * TTI.getArithmeticInstrCost( 6309 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 6310 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 6311 } 6312 case Instruction::FNeg: { 6313 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6314 return N * TTI.getArithmeticInstrCost( 6315 I->getOpcode(), VectorTy, TargetTransformInfo::OK_AnyValue, 6316 TargetTransformInfo::OK_AnyValue, 6317 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 6318 I->getOperand(0), I); 6319 } 6320 case Instruction::Select: { 6321 SelectInst *SI = cast<SelectInst>(I); 6322 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6323 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6324 Type *CondTy = SI->getCondition()->getType(); 6325 if (!ScalarCond) 6326 CondTy = VectorType::get(CondTy, VF); 6327 6328 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I); 6329 } 6330 case Instruction::ICmp: 6331 case Instruction::FCmp: { 6332 Type *ValTy = I->getOperand(0)->getType(); 6333 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6334 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 6335 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 6336 VectorTy = ToVectorTy(ValTy, VF); 6337 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I); 6338 } 6339 case Instruction::Store: 6340 case Instruction::Load: { 6341 unsigned Width = VF; 6342 if (Width > 1) { 6343 InstWidening Decision = getWideningDecision(I, Width); 6344 assert(Decision != CM_Unknown && 6345 "CM decision should be taken at this point"); 6346 if (Decision == CM_Scalarize) 6347 Width = 1; 6348 } 6349 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 6350 return getMemoryInstructionCost(I, VF); 6351 } 6352 case Instruction::ZExt: 6353 case Instruction::SExt: 6354 case Instruction::FPToUI: 6355 case Instruction::FPToSI: 6356 case Instruction::FPExt: 6357 case Instruction::PtrToInt: 6358 case Instruction::IntToPtr: 6359 case Instruction::SIToFP: 6360 case Instruction::UIToFP: 6361 case Instruction::Trunc: 6362 case Instruction::FPTrunc: 6363 case Instruction::BitCast: { 6364 // We optimize the truncation of induction variables having constant 6365 // integer steps. The cost of these truncations is the same as the scalar 6366 // operation. 6367 if (isOptimizableIVTruncate(I, VF)) { 6368 auto *Trunc = cast<TruncInst>(I); 6369 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 6370 Trunc->getSrcTy(), Trunc); 6371 } 6372 6373 Type *SrcScalarTy = I->getOperand(0)->getType(); 6374 Type *SrcVecTy = 6375 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 6376 if (canTruncateToMinimalBitwidth(I, VF)) { 6377 // This cast is going to be shrunk. This may remove the cast or it might 6378 // turn it into slightly different cast. For example, if MinBW == 16, 6379 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6380 // 6381 // Calculate the modified src and dest types. 6382 Type *MinVecTy = VectorTy; 6383 if (I->getOpcode() == Instruction::Trunc) { 6384 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6385 VectorTy = 6386 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6387 } else if (I->getOpcode() == Instruction::ZExt || 6388 I->getOpcode() == Instruction::SExt) { 6389 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6390 VectorTy = 6391 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6392 } 6393 } 6394 6395 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6396 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I); 6397 } 6398 case Instruction::Call: { 6399 bool NeedToScalarize; 6400 CallInst *CI = cast<CallInst>(I); 6401 unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 6402 if (getVectorIntrinsicIDForCall(CI, TLI)) 6403 return std::min(CallCost, getVectorIntrinsicCost(CI, VF)); 6404 return CallCost; 6405 } 6406 default: 6407 // The cost of executing VF copies of the scalar instruction. This opcode 6408 // is unknown. Assume that it is the same as 'mul'. 6409 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6410 getScalarizationOverhead(I, VF); 6411 } // end of switch. 6412 } 6413 6414 char LoopVectorize::ID = 0; 6415 6416 static const char lv_name[] = "Loop Vectorization"; 6417 6418 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6419 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6420 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6421 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6422 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6423 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6424 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6425 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6426 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6427 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6428 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6429 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6430 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6431 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 6432 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 6433 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6434 6435 namespace llvm { 6436 6437 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 6438 6439 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6440 bool VectorizeOnlyWhenForced) { 6441 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6442 } 6443 6444 } // end namespace llvm 6445 6446 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6447 // Check if the pointer operand of a load or store instruction is 6448 // consecutive. 6449 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6450 return Legal->isConsecutivePtr(Ptr); 6451 return false; 6452 } 6453 6454 void LoopVectorizationCostModel::collectValuesToIgnore() { 6455 // Ignore ephemeral values. 6456 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6457 6458 // Ignore type-promoting instructions we identified during reduction 6459 // detection. 6460 for (auto &Reduction : Legal->getReductionVars()) { 6461 RecurrenceDescriptor &RedDes = Reduction.second; 6462 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6463 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6464 } 6465 // Ignore type-casting instructions we identified during induction 6466 // detection. 6467 for (auto &Induction : Legal->getInductionVars()) { 6468 InductionDescriptor &IndDes = Induction.second; 6469 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6470 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6471 } 6472 } 6473 6474 // TODO: we could return a pair of values that specify the max VF and 6475 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 6476 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 6477 // doesn't have a cost model that can choose which plan to execute if 6478 // more than one is generated. 6479 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 6480 LoopVectorizationCostModel &CM) { 6481 unsigned WidestType; 6482 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 6483 return WidestVectorRegBits / WidestType; 6484 } 6485 6486 VectorizationFactor 6487 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) { 6488 unsigned VF = UserVF; 6489 // Outer loop handling: They may require CFG and instruction level 6490 // transformations before even evaluating whether vectorization is profitable. 6491 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6492 // the vectorization pipeline. 6493 if (!OrigLoop->empty()) { 6494 // If the user doesn't provide a vectorization factor, determine a 6495 // reasonable one. 6496 if (!UserVF) { 6497 VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM); 6498 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 6499 6500 // Make sure we have a VF > 1 for stress testing. 6501 if (VPlanBuildStressTest && VF < 2) { 6502 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 6503 << "overriding computed VF.\n"); 6504 VF = 4; 6505 } 6506 } 6507 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6508 assert(isPowerOf2_32(VF) && "VF needs to be a power of two"); 6509 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF 6510 << " to build VPlans.\n"); 6511 buildVPlans(VF, VF); 6512 6513 // For VPlan build stress testing, we bail out after VPlan construction. 6514 if (VPlanBuildStressTest) 6515 return VectorizationFactor::Disabled(); 6516 6517 return {VF, 0}; 6518 } 6519 6520 LLVM_DEBUG( 6521 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6522 "VPlan-native path.\n"); 6523 return VectorizationFactor::Disabled(); 6524 } 6525 6526 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) { 6527 assert(OrigLoop->empty() && "Inner loop expected."); 6528 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(); 6529 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 6530 return None; 6531 6532 // Invalidate interleave groups if all blocks of loop will be predicated. 6533 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6534 !useMaskedInterleavedAccesses(*TTI)) { 6535 LLVM_DEBUG( 6536 dbgs() 6537 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6538 "which requires masked-interleaved support.\n"); 6539 if (CM.InterleaveInfo.invalidateGroups()) 6540 // Invalidating interleave groups also requires invalidating all decisions 6541 // based on them, which includes widening decisions and uniform and scalar 6542 // values. 6543 CM.invalidateCostModelingDecisions(); 6544 } 6545 6546 if (UserVF) { 6547 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6548 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6549 // Collect the instructions (and their associated costs) that will be more 6550 // profitable to scalarize. 6551 CM.selectUserVectorizationFactor(UserVF); 6552 buildVPlansWithVPRecipes(UserVF, UserVF); 6553 LLVM_DEBUG(printPlans(dbgs())); 6554 return {{UserVF, 0}}; 6555 } 6556 6557 unsigned MaxVF = MaybeMaxVF.getValue(); 6558 assert(MaxVF != 0 && "MaxVF is zero."); 6559 6560 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6561 // Collect Uniform and Scalar instructions after vectorization with VF. 6562 CM.collectUniformsAndScalars(VF); 6563 6564 // Collect the instructions (and their associated costs) that will be more 6565 // profitable to scalarize. 6566 if (VF > 1) 6567 CM.collectInstsToScalarize(VF); 6568 } 6569 6570 buildVPlansWithVPRecipes(1, MaxVF); 6571 LLVM_DEBUG(printPlans(dbgs())); 6572 if (MaxVF == 1) 6573 return VectorizationFactor::Disabled(); 6574 6575 // Select the optimal vectorization factor. 6576 return CM.selectVectorizationFactor(MaxVF); 6577 } 6578 6579 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6580 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6581 << '\n'); 6582 BestVF = VF; 6583 BestUF = UF; 6584 6585 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6586 return !Plan->hasVF(VF); 6587 }); 6588 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6589 } 6590 6591 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6592 DominatorTree *DT) { 6593 // Perform the actual loop transformation. 6594 6595 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6596 VPCallbackILV CallbackILV(ILV); 6597 6598 VPTransformState State{BestVF, BestUF, LI, 6599 DT, ILV.Builder, ILV.VectorLoopValueMap, 6600 &ILV, CallbackILV}; 6601 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6602 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6603 State.CanonicalIV = ILV.Induction; 6604 6605 //===------------------------------------------------===// 6606 // 6607 // Notice: any optimization or new instruction that go 6608 // into the code below should also be implemented in 6609 // the cost-model. 6610 // 6611 //===------------------------------------------------===// 6612 6613 // 2. Copy and widen instructions from the old loop into the new loop. 6614 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6615 VPlans.front()->execute(&State); 6616 6617 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6618 // predication, updating analyses. 6619 ILV.fixVectorizedLoop(); 6620 } 6621 6622 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6623 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6624 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6625 6626 // We create new control-flow for the vectorized loop, so the original 6627 // condition will be dead after vectorization if it's only used by the 6628 // branch. 6629 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6630 if (Cmp && Cmp->hasOneUse()) 6631 DeadInstructions.insert(Cmp); 6632 6633 // We create new "steps" for induction variable updates to which the original 6634 // induction variables map. An original update instruction will be dead if 6635 // all its users except the induction variable are dead. 6636 for (auto &Induction : Legal->getInductionVars()) { 6637 PHINode *Ind = Induction.first; 6638 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6639 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6640 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6641 DeadInstructions.end(); 6642 })) 6643 DeadInstructions.insert(IndUpdate); 6644 6645 // We record as "Dead" also the type-casting instructions we had identified 6646 // during induction analysis. We don't need any handling for them in the 6647 // vectorized loop because we have proven that, under a proper runtime 6648 // test guarding the vectorized loop, the value of the phi, and the casted 6649 // value of the phi, are the same. The last instruction in this casting chain 6650 // will get its scalar/vector/widened def from the scalar/vector/widened def 6651 // of the respective phi node. Any other casts in the induction def-use chain 6652 // have no other uses outside the phi update chain, and will be ignored. 6653 InductionDescriptor &IndDes = Induction.second; 6654 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6655 DeadInstructions.insert(Casts.begin(), Casts.end()); 6656 } 6657 } 6658 6659 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6660 6661 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6662 6663 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6664 Instruction::BinaryOps BinOp) { 6665 // When unrolling and the VF is 1, we only need to add a simple scalar. 6666 Type *Ty = Val->getType(); 6667 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6668 6669 if (Ty->isFloatingPointTy()) { 6670 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6671 6672 // Floating point operations had to be 'fast' to enable the unrolling. 6673 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6674 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6675 } 6676 Constant *C = ConstantInt::get(Ty, StartIdx); 6677 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6678 } 6679 6680 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6681 SmallVector<Metadata *, 4> MDs; 6682 // Reserve first location for self reference to the LoopID metadata node. 6683 MDs.push_back(nullptr); 6684 bool IsUnrollMetadata = false; 6685 MDNode *LoopID = L->getLoopID(); 6686 if (LoopID) { 6687 // First find existing loop unrolling disable metadata. 6688 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6689 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6690 if (MD) { 6691 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6692 IsUnrollMetadata = 6693 S && S->getString().startswith("llvm.loop.unroll.disable"); 6694 } 6695 MDs.push_back(LoopID->getOperand(i)); 6696 } 6697 } 6698 6699 if (!IsUnrollMetadata) { 6700 // Add runtime unroll disable metadata. 6701 LLVMContext &Context = L->getHeader()->getContext(); 6702 SmallVector<Metadata *, 1> DisableOperands; 6703 DisableOperands.push_back( 6704 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6705 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6706 MDs.push_back(DisableNode); 6707 MDNode *NewLoopID = MDNode::get(Context, MDs); 6708 // Set operand 0 to refer to the loop id itself. 6709 NewLoopID->replaceOperandWith(0, NewLoopID); 6710 L->setLoopID(NewLoopID); 6711 } 6712 } 6713 6714 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6715 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6716 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6717 bool PredicateAtRangeStart = Predicate(Range.Start); 6718 6719 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6720 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6721 Range.End = TmpVF; 6722 break; 6723 } 6724 6725 return PredicateAtRangeStart; 6726 } 6727 6728 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6729 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6730 /// of VF's starting at a given VF and extending it as much as possible. Each 6731 /// vectorization decision can potentially shorten this sub-range during 6732 /// buildVPlan(). 6733 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6734 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6735 VFRange SubRange = {VF, MaxVF + 1}; 6736 VPlans.push_back(buildVPlan(SubRange)); 6737 VF = SubRange.End; 6738 } 6739 } 6740 6741 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6742 VPlanPtr &Plan) { 6743 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6744 6745 // Look for cached value. 6746 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6747 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6748 if (ECEntryIt != EdgeMaskCache.end()) 6749 return ECEntryIt->second; 6750 6751 VPValue *SrcMask = createBlockInMask(Src, Plan); 6752 6753 // The terminator has to be a branch inst! 6754 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6755 assert(BI && "Unexpected terminator found"); 6756 6757 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 6758 return EdgeMaskCache[Edge] = SrcMask; 6759 6760 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6761 assert(EdgeMask && "No Edge Mask found for condition"); 6762 6763 if (BI->getSuccessor(0) != Dst) 6764 EdgeMask = Builder.createNot(EdgeMask); 6765 6766 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6767 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6768 6769 return EdgeMaskCache[Edge] = EdgeMask; 6770 } 6771 6772 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6773 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6774 6775 // Look for cached value. 6776 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6777 if (BCEntryIt != BlockMaskCache.end()) 6778 return BCEntryIt->second; 6779 6780 // All-one mask is modelled as no-mask following the convention for masked 6781 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6782 VPValue *BlockMask = nullptr; 6783 6784 if (OrigLoop->getHeader() == BB) { 6785 if (!CM.blockNeedsPredication(BB)) 6786 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6787 6788 // Introduce the early-exit compare IV <= BTC to form header block mask. 6789 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6790 // Start by constructing the desired canonical IV. 6791 VPValue *IV = nullptr; 6792 if (Legal->getPrimaryInduction()) 6793 IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6794 else { 6795 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 6796 Builder.getInsertBlock()->appendRecipe(IVRecipe); 6797 IV = IVRecipe->getVPValue(); 6798 } 6799 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6800 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6801 return BlockMaskCache[BB] = BlockMask; 6802 } 6803 6804 // This is the block mask. We OR all incoming edges. 6805 for (auto *Predecessor : predecessors(BB)) { 6806 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6807 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6808 return BlockMaskCache[BB] = EdgeMask; 6809 6810 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6811 BlockMask = EdgeMask; 6812 continue; 6813 } 6814 6815 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6816 } 6817 6818 return BlockMaskCache[BB] = BlockMask; 6819 } 6820 6821 VPWidenMemoryInstructionRecipe * 6822 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6823 VPlanPtr &Plan) { 6824 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6825 "Must be called with either a load or store"); 6826 6827 auto willWiden = [&](unsigned VF) -> bool { 6828 if (VF == 1) 6829 return false; 6830 LoopVectorizationCostModel::InstWidening Decision = 6831 CM.getWideningDecision(I, VF); 6832 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6833 "CM decision should be taken at this point."); 6834 if (Decision == LoopVectorizationCostModel::CM_Interleave) 6835 return true; 6836 if (CM.isScalarAfterVectorization(I, VF) || 6837 CM.isProfitableToScalarize(I, VF)) 6838 return false; 6839 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6840 }; 6841 6842 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6843 return nullptr; 6844 6845 VPValue *Mask = nullptr; 6846 if (Legal->isMaskRequired(I)) 6847 Mask = createBlockInMask(I->getParent(), Plan); 6848 6849 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 6850 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 6851 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 6852 6853 StoreInst *Store = cast<StoreInst>(I); 6854 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 6855 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 6856 } 6857 6858 VPWidenIntOrFpInductionRecipe * 6859 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const { 6860 // Check if this is an integer or fp induction. If so, build the recipe that 6861 // produces its scalar and vector values. 6862 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 6863 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6864 II.getKind() == InductionDescriptor::IK_FpInduction) 6865 return new VPWidenIntOrFpInductionRecipe(Phi); 6866 6867 return nullptr; 6868 } 6869 6870 VPWidenIntOrFpInductionRecipe * 6871 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, 6872 VFRange &Range) const { 6873 // Optimize the special case where the source is a constant integer 6874 // induction variable. Notice that we can only optimize the 'trunc' case 6875 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6876 // (c) other casts depend on pointer size. 6877 6878 // Determine whether \p K is a truncation based on an induction variable that 6879 // can be optimized. 6880 auto isOptimizableIVTruncate = 6881 [&](Instruction *K) -> std::function<bool(unsigned)> { 6882 return 6883 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6884 }; 6885 6886 if (LoopVectorizationPlanner::getDecisionAndClampRange( 6887 isOptimizableIVTruncate(I), Range)) 6888 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6889 I); 6890 return nullptr; 6891 } 6892 6893 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 6894 // We know that all PHIs in non-header blocks are converted into selects, so 6895 // we don't have to worry about the insertion order and we can just use the 6896 // builder. At this point we generate the predication tree. There may be 6897 // duplications since this is a simple recursive scan, but future 6898 // optimizations will clean it up. 6899 6900 SmallVector<VPValue *, 2> Operands; 6901 unsigned NumIncoming = Phi->getNumIncomingValues(); 6902 for (unsigned In = 0; In < NumIncoming; In++) { 6903 VPValue *EdgeMask = 6904 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6905 assert((EdgeMask || NumIncoming == 1) && 6906 "Multiple predecessors with one having a full mask"); 6907 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 6908 if (EdgeMask) 6909 Operands.push_back(EdgeMask); 6910 } 6911 return new VPBlendRecipe(Phi, Operands); 6912 } 6913 6914 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 6915 VPlan &Plan) const { 6916 6917 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6918 [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); }, 6919 Range); 6920 6921 if (IsPredicated) 6922 return nullptr; 6923 6924 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6925 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6926 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6927 return nullptr; 6928 6929 auto willWiden = [&](unsigned VF) -> bool { 6930 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6931 // The following case may be scalarized depending on the VF. 6932 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6933 // version of the instruction. 6934 // Is it beneficial to perform intrinsic call compared to lib call? 6935 bool NeedToScalarize = false; 6936 unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 6937 bool UseVectorIntrinsic = 6938 ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost; 6939 return UseVectorIntrinsic || !NeedToScalarize; 6940 }; 6941 6942 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6943 return nullptr; 6944 6945 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 6946 } 6947 6948 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 6949 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 6950 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 6951 // Instruction should be widened, unless it is scalar after vectorization, 6952 // scalarization is profitable or it is predicated. 6953 auto WillScalarize = [this, I](unsigned VF) -> bool { 6954 return CM.isScalarAfterVectorization(I, VF) || 6955 CM.isProfitableToScalarize(I, VF) || 6956 CM.isScalarWithPredication(I, VF); 6957 }; 6958 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 6959 Range); 6960 } 6961 6962 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 6963 auto IsVectorizableOpcode = [](unsigned Opcode) { 6964 switch (Opcode) { 6965 case Instruction::Add: 6966 case Instruction::And: 6967 case Instruction::AShr: 6968 case Instruction::BitCast: 6969 case Instruction::FAdd: 6970 case Instruction::FCmp: 6971 case Instruction::FDiv: 6972 case Instruction::FMul: 6973 case Instruction::FNeg: 6974 case Instruction::FPExt: 6975 case Instruction::FPToSI: 6976 case Instruction::FPToUI: 6977 case Instruction::FPTrunc: 6978 case Instruction::FRem: 6979 case Instruction::FSub: 6980 case Instruction::ICmp: 6981 case Instruction::IntToPtr: 6982 case Instruction::LShr: 6983 case Instruction::Mul: 6984 case Instruction::Or: 6985 case Instruction::PtrToInt: 6986 case Instruction::SDiv: 6987 case Instruction::Select: 6988 case Instruction::SExt: 6989 case Instruction::Shl: 6990 case Instruction::SIToFP: 6991 case Instruction::SRem: 6992 case Instruction::Sub: 6993 case Instruction::Trunc: 6994 case Instruction::UDiv: 6995 case Instruction::UIToFP: 6996 case Instruction::URem: 6997 case Instruction::Xor: 6998 case Instruction::ZExt: 6999 return true; 7000 } 7001 return false; 7002 }; 7003 7004 if (!IsVectorizableOpcode(I->getOpcode())) 7005 return nullptr; 7006 7007 // Success: widen this instruction. 7008 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 7009 } 7010 7011 VPBasicBlock *VPRecipeBuilder::handleReplication( 7012 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 7013 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 7014 VPlanPtr &Plan) { 7015 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 7016 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 7017 Range); 7018 7019 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 7020 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 7021 7022 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 7023 setRecipe(I, Recipe); 7024 7025 // Find if I uses a predicated instruction. If so, it will use its scalar 7026 // value. Avoid hoisting the insert-element which packs the scalar value into 7027 // a vector value, as that happens iff all users use the vector value. 7028 for (auto &Op : I->operands()) 7029 if (auto *PredInst = dyn_cast<Instruction>(Op)) 7030 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 7031 PredInst2Recipe[PredInst]->setAlsoPack(false); 7032 7033 // Finalize the recipe for Instr, first if it is not predicated. 7034 if (!IsPredicated) { 7035 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 7036 VPBB->appendRecipe(Recipe); 7037 return VPBB; 7038 } 7039 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 7040 assert(VPBB->getSuccessors().empty() && 7041 "VPBB has successors when handling predicated replication."); 7042 // Record predicated instructions for above packing optimizations. 7043 PredInst2Recipe[I] = Recipe; 7044 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 7045 VPBlockUtils::insertBlockAfter(Region, VPBB); 7046 auto *RegSucc = new VPBasicBlock(); 7047 VPBlockUtils::insertBlockAfter(RegSucc, Region); 7048 return RegSucc; 7049 } 7050 7051 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 7052 VPRecipeBase *PredRecipe, 7053 VPlanPtr &Plan) { 7054 // Instructions marked for predication are replicated and placed under an 7055 // if-then construct to prevent side-effects. 7056 7057 // Generate recipes to compute the block mask for this region. 7058 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 7059 7060 // Build the triangular if-then region. 7061 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 7062 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 7063 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 7064 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 7065 auto *PHIRecipe = 7066 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 7067 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 7068 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 7069 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 7070 7071 // Note: first set Entry as region entry and then connect successors starting 7072 // from it in order, to propagate the "parent" of each VPBasicBlock. 7073 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 7074 VPBlockUtils::connectBlocks(Pred, Exit); 7075 7076 return Region; 7077 } 7078 7079 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 7080 VFRange &Range, 7081 VPlanPtr &Plan) { 7082 // First, check for specific widening recipes that deal with calls, memory 7083 // operations, inductions and Phi nodes. 7084 if (auto *CI = dyn_cast<CallInst>(Instr)) 7085 return tryToWidenCall(CI, Range, *Plan); 7086 7087 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 7088 return tryToWidenMemory(Instr, Range, Plan); 7089 7090 VPRecipeBase *Recipe; 7091 if (auto Phi = dyn_cast<PHINode>(Instr)) { 7092 if (Phi->getParent() != OrigLoop->getHeader()) 7093 return tryToBlend(Phi, Plan); 7094 if ((Recipe = tryToOptimizeInductionPHI(Phi))) 7095 return Recipe; 7096 return new VPWidenPHIRecipe(Phi); 7097 return new VPWidenPHIRecipe(Phi); 7098 } 7099 7100 if (isa<TruncInst>(Instr) && 7101 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range))) 7102 return Recipe; 7103 7104 if (!shouldWiden(Instr, Range)) 7105 return nullptr; 7106 7107 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 7108 return new VPWidenGEPRecipe(GEP, OrigLoop); 7109 7110 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 7111 bool InvariantCond = 7112 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 7113 return new VPWidenSelectRecipe(*SI, InvariantCond); 7114 } 7115 7116 return tryToWiden(Instr, *Plan); 7117 } 7118 7119 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 7120 unsigned MaxVF) { 7121 assert(OrigLoop->empty() && "Inner loop expected."); 7122 7123 // Collect conditions feeding internal conditional branches; they need to be 7124 // represented in VPlan for it to model masking. 7125 SmallPtrSet<Value *, 1> NeedDef; 7126 7127 auto *Latch = OrigLoop->getLoopLatch(); 7128 for (BasicBlock *BB : OrigLoop->blocks()) { 7129 if (BB == Latch) 7130 continue; 7131 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 7132 if (Branch && Branch->isConditional()) 7133 NeedDef.insert(Branch->getCondition()); 7134 } 7135 7136 // If the tail is to be folded by masking, the primary induction variable, if 7137 // exists needs to be represented in VPlan for it to model early-exit masking. 7138 // Also, both the Phi and the live-out instruction of each reduction are 7139 // required in order to introduce a select between them in VPlan. 7140 if (CM.foldTailByMasking()) { 7141 if (Legal->getPrimaryInduction()) 7142 NeedDef.insert(Legal->getPrimaryInduction()); 7143 for (auto &Reduction : Legal->getReductionVars()) { 7144 NeedDef.insert(Reduction.first); 7145 NeedDef.insert(Reduction.second.getLoopExitInstr()); 7146 } 7147 } 7148 7149 // Collect instructions from the original loop that will become trivially dead 7150 // in the vectorized loop. We don't need to vectorize these instructions. For 7151 // example, original induction update instructions can become dead because we 7152 // separately emit induction "steps" when generating code for the new loop. 7153 // Similarly, we create a new latch condition when setting up the structure 7154 // of the new loop, so the old one can become dead. 7155 SmallPtrSet<Instruction *, 4> DeadInstructions; 7156 collectTriviallyDeadInstructions(DeadInstructions); 7157 7158 // Add assume instructions we need to drop to DeadInstructions, to prevent 7159 // them from being added to the VPlan. 7160 // TODO: We only need to drop assumes in blocks that get flattend. If the 7161 // control flow is preserved, we should keep them. 7162 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 7163 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 7164 7165 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 7166 // Dead instructions do not need sinking. Remove them from SinkAfter. 7167 for (Instruction *I : DeadInstructions) 7168 SinkAfter.erase(I); 7169 7170 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 7171 VFRange SubRange = {VF, MaxVF + 1}; 7172 VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef, 7173 DeadInstructions, SinkAfter)); 7174 VF = SubRange.End; 7175 } 7176 } 7177 7178 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 7179 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 7180 SmallPtrSetImpl<Instruction *> &DeadInstructions, 7181 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 7182 7183 // Hold a mapping from predicated instructions to their recipes, in order to 7184 // fix their AlsoPack behavior if a user is determined to replicate and use a 7185 // scalar instead of vector value. 7186 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 7187 7188 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 7189 7190 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 7191 7192 // --------------------------------------------------------------------------- 7193 // Pre-construction: record ingredients whose recipes we'll need to further 7194 // process after constructing the initial VPlan. 7195 // --------------------------------------------------------------------------- 7196 7197 // Mark instructions we'll need to sink later and their targets as 7198 // ingredients whose recipe we'll need to record. 7199 for (auto &Entry : SinkAfter) { 7200 RecipeBuilder.recordRecipeOf(Entry.first); 7201 RecipeBuilder.recordRecipeOf(Entry.second); 7202 } 7203 7204 // For each interleave group which is relevant for this (possibly trimmed) 7205 // Range, add it to the set of groups to be later applied to the VPlan and add 7206 // placeholders for its members' Recipes which we'll be replacing with a 7207 // single VPInterleaveRecipe. 7208 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 7209 auto applyIG = [IG, this](unsigned VF) -> bool { 7210 return (VF >= 2 && // Query is illegal for VF == 1 7211 CM.getWideningDecision(IG->getInsertPos(), VF) == 7212 LoopVectorizationCostModel::CM_Interleave); 7213 }; 7214 if (!getDecisionAndClampRange(applyIG, Range)) 7215 continue; 7216 InterleaveGroups.insert(IG); 7217 for (unsigned i = 0; i < IG->getFactor(); i++) 7218 if (Instruction *Member = IG->getMember(i)) 7219 RecipeBuilder.recordRecipeOf(Member); 7220 }; 7221 7222 // --------------------------------------------------------------------------- 7223 // Build initial VPlan: Scan the body of the loop in a topological order to 7224 // visit each basic block after having visited its predecessor basic blocks. 7225 // --------------------------------------------------------------------------- 7226 7227 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 7228 auto Plan = std::make_unique<VPlan>(); 7229 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 7230 Plan->setEntry(VPBB); 7231 7232 // Represent values that will have defs inside VPlan. 7233 for (Value *V : NeedDef) 7234 Plan->addVPValue(V); 7235 7236 // Scan the body of the loop in a topological order to visit each basic block 7237 // after having visited its predecessor basic blocks. 7238 LoopBlocksDFS DFS(OrigLoop); 7239 DFS.perform(LI); 7240 7241 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 7242 // Relevant instructions from basic block BB will be grouped into VPRecipe 7243 // ingredients and fill a new VPBasicBlock. 7244 unsigned VPBBsForBB = 0; 7245 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 7246 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 7247 VPBB = FirstVPBBForBB; 7248 Builder.setInsertPoint(VPBB); 7249 7250 // Introduce each ingredient into VPlan. 7251 // TODO: Model and preserve debug instrinsics in VPlan. 7252 for (Instruction &I : BB->instructionsWithoutDebug()) { 7253 Instruction *Instr = &I; 7254 7255 // First filter out irrelevant instructions, to ensure no recipes are 7256 // built for them. 7257 if (isa<BranchInst>(Instr) || 7258 DeadInstructions.find(Instr) != DeadInstructions.end()) 7259 continue; 7260 7261 if (auto Recipe = 7262 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 7263 RecipeBuilder.setRecipe(Instr, Recipe); 7264 VPBB->appendRecipe(Recipe); 7265 continue; 7266 } 7267 7268 // Otherwise, if all widening options failed, Instruction is to be 7269 // replicated. This may create a successor for VPBB. 7270 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 7271 Instr, Range, VPBB, PredInst2Recipe, Plan); 7272 if (NextVPBB != VPBB) { 7273 VPBB = NextVPBB; 7274 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 7275 : ""); 7276 } 7277 } 7278 } 7279 7280 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 7281 // may also be empty, such as the last one VPBB, reflecting original 7282 // basic-blocks with no recipes. 7283 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 7284 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 7285 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 7286 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 7287 delete PreEntry; 7288 7289 // --------------------------------------------------------------------------- 7290 // Transform initial VPlan: Apply previously taken decisions, in order, to 7291 // bring the VPlan to its final state. 7292 // --------------------------------------------------------------------------- 7293 7294 // Apply Sink-After legal constraints. 7295 for (auto &Entry : SinkAfter) { 7296 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 7297 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 7298 Sink->moveAfter(Target); 7299 } 7300 7301 // Interleave memory: for each Interleave Group we marked earlier as relevant 7302 // for this VPlan, replace the Recipes widening its memory instructions with a 7303 // single VPInterleaveRecipe at its insertion point. 7304 for (auto IG : InterleaveGroups) { 7305 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 7306 RecipeBuilder.getRecipe(IG->getInsertPos())); 7307 (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask())) 7308 ->insertBefore(Recipe); 7309 7310 for (unsigned i = 0; i < IG->getFactor(); ++i) 7311 if (Instruction *Member = IG->getMember(i)) { 7312 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 7313 } 7314 } 7315 7316 // Finally, if tail is folded by masking, introduce selects between the phi 7317 // and the live-out instruction of each reduction, at the end of the latch. 7318 if (CM.foldTailByMasking()) { 7319 Builder.setInsertPoint(VPBB); 7320 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 7321 for (auto &Reduction : Legal->getReductionVars()) { 7322 VPValue *Phi = Plan->getVPValue(Reduction.first); 7323 VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr()); 7324 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 7325 } 7326 } 7327 7328 std::string PlanName; 7329 raw_string_ostream RSO(PlanName); 7330 unsigned VF = Range.Start; 7331 Plan->addVF(VF); 7332 RSO << "Initial VPlan for VF={" << VF; 7333 for (VF *= 2; VF < Range.End; VF *= 2) { 7334 Plan->addVF(VF); 7335 RSO << "," << VF; 7336 } 7337 RSO << "},UF>=1"; 7338 RSO.flush(); 7339 Plan->setName(PlanName); 7340 7341 return Plan; 7342 } 7343 7344 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 7345 // Outer loop handling: They may require CFG and instruction level 7346 // transformations before even evaluating whether vectorization is profitable. 7347 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7348 // the vectorization pipeline. 7349 assert(!OrigLoop->empty()); 7350 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7351 7352 // Create new empty VPlan 7353 auto Plan = std::make_unique<VPlan>(); 7354 7355 // Build hierarchical CFG 7356 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 7357 HCFGBuilder.buildHierarchicalCFG(); 7358 7359 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 7360 Plan->addVF(VF); 7361 7362 if (EnableVPlanPredication) { 7363 VPlanPredicator VPP(*Plan); 7364 VPP.predicate(); 7365 7366 // Avoid running transformation to recipes until masked code generation in 7367 // VPlan-native path is in place. 7368 return Plan; 7369 } 7370 7371 SmallPtrSet<Instruction *, 1> DeadInstructions; 7372 VPlanTransforms::VPInstructionsToVPRecipes( 7373 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 7374 return Plan; 7375 } 7376 7377 Value* LoopVectorizationPlanner::VPCallbackILV:: 7378 getOrCreateVectorValues(Value *V, unsigned Part) { 7379 return ILV.getOrCreateVectorValue(V, Part); 7380 } 7381 7382 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue( 7383 Value *V, const VPIteration &Instance) { 7384 return ILV.getOrCreateScalarValue(V, Instance); 7385 } 7386 7387 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 7388 VPSlotTracker &SlotTracker) const { 7389 O << " +\n" 7390 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 7391 IG->getInsertPos()->printAsOperand(O, false); 7392 O << ", "; 7393 getAddr()->printAsOperand(O, SlotTracker); 7394 VPValue *Mask = getMask(); 7395 if (Mask) { 7396 O << ", "; 7397 Mask->printAsOperand(O, SlotTracker); 7398 } 7399 O << "\\l\""; 7400 for (unsigned i = 0; i < IG->getFactor(); ++i) 7401 if (Instruction *I = IG->getMember(i)) 7402 O << " +\n" 7403 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 7404 } 7405 7406 void VPWidenCallRecipe::execute(VPTransformState &State) { 7407 State.ILV->widenCallInstruction(Ingredient, User, State); 7408 } 7409 7410 void VPWidenSelectRecipe::execute(VPTransformState &State) { 7411 State.ILV->widenSelectInstruction(Ingredient, InvariantCond); 7412 } 7413 7414 void VPWidenRecipe::execute(VPTransformState &State) { 7415 State.ILV->widenInstruction(Ingredient, User, State); 7416 } 7417 7418 void VPWidenGEPRecipe::execute(VPTransformState &State) { 7419 State.ILV->widenGEP(GEP, State.UF, State.VF, IsPtrLoopInvariant, 7420 IsIndexLoopInvariant); 7421 } 7422 7423 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 7424 assert(!State.Instance && "Int or FP induction being replicated."); 7425 State.ILV->widenIntOrFpInduction(IV, Trunc); 7426 } 7427 7428 void VPWidenPHIRecipe::execute(VPTransformState &State) { 7429 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 7430 } 7431 7432 void VPBlendRecipe::execute(VPTransformState &State) { 7433 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7434 // We know that all PHIs in non-header blocks are converted into 7435 // selects, so we don't have to worry about the insertion order and we 7436 // can just use the builder. 7437 // At this point we generate the predication tree. There may be 7438 // duplications since this is a simple recursive scan, but future 7439 // optimizations will clean it up. 7440 7441 unsigned NumIncoming = getNumIncomingValues(); 7442 7443 // Generate a sequence of selects of the form: 7444 // SELECT(Mask3, In3, 7445 // SELECT(Mask2, In2, 7446 // SELECT(Mask1, In1, 7447 // In0))) 7448 // Note that Mask0 is never used: lanes for which no path reaches this phi and 7449 // are essentially undef are taken from In0. 7450 InnerLoopVectorizer::VectorParts Entry(State.UF); 7451 for (unsigned In = 0; In < NumIncoming; ++In) { 7452 for (unsigned Part = 0; Part < State.UF; ++Part) { 7453 // We might have single edge PHIs (blocks) - use an identity 7454 // 'select' for the first PHI operand. 7455 Value *In0 = State.get(getIncomingValue(In), Part); 7456 if (In == 0) 7457 Entry[Part] = In0; // Initialize with the first incoming value. 7458 else { 7459 // Select between the current value and the previous incoming edge 7460 // based on the incoming mask. 7461 Value *Cond = State.get(getMask(In), Part); 7462 Entry[Part] = 7463 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7464 } 7465 } 7466 } 7467 for (unsigned Part = 0; Part < State.UF; ++Part) 7468 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7469 } 7470 7471 void VPInterleaveRecipe::execute(VPTransformState &State) { 7472 assert(!State.Instance && "Interleave group being replicated."); 7473 State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask()); 7474 } 7475 7476 void VPReplicateRecipe::execute(VPTransformState &State) { 7477 if (State.Instance) { // Generate a single instance. 7478 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 7479 // Insert scalar instance packing it into a vector. 7480 if (AlsoPack && State.VF > 1) { 7481 // If we're constructing lane 0, initialize to start from undef. 7482 if (State.Instance->Lane == 0) { 7483 Value *Undef = 7484 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7485 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7486 } 7487 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7488 } 7489 return; 7490 } 7491 7492 // Generate scalar instances for all VF lanes of all UF parts, unless the 7493 // instruction is uniform inwhich case generate only the first lane for each 7494 // of the UF parts. 7495 unsigned EndLane = IsUniform ? 1 : State.VF; 7496 for (unsigned Part = 0; Part < State.UF; ++Part) 7497 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7498 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7499 } 7500 7501 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7502 assert(State.Instance && "Branch on Mask works only on single instance."); 7503 7504 unsigned Part = State.Instance->Part; 7505 unsigned Lane = State.Instance->Lane; 7506 7507 Value *ConditionBit = nullptr; 7508 if (!User) // Block in mask is all-one. 7509 ConditionBit = State.Builder.getTrue(); 7510 else { 7511 VPValue *BlockInMask = User->getOperand(0); 7512 ConditionBit = State.get(BlockInMask, Part); 7513 if (ConditionBit->getType()->isVectorTy()) 7514 ConditionBit = State.Builder.CreateExtractElement( 7515 ConditionBit, State.Builder.getInt32(Lane)); 7516 } 7517 7518 // Replace the temporary unreachable terminator with a new conditional branch, 7519 // whose two destinations will be set later when they are created. 7520 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7521 assert(isa<UnreachableInst>(CurrentTerminator) && 7522 "Expected to replace unreachable terminator with conditional branch."); 7523 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7524 CondBr->setSuccessor(0, nullptr); 7525 ReplaceInstWithInst(CurrentTerminator, CondBr); 7526 } 7527 7528 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7529 assert(State.Instance && "Predicated instruction PHI works per instance."); 7530 Instruction *ScalarPredInst = cast<Instruction>( 7531 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7532 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7533 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7534 assert(PredicatingBB && "Predicated block has no single predecessor."); 7535 7536 // By current pack/unpack logic we need to generate only a single phi node: if 7537 // a vector value for the predicated instruction exists at this point it means 7538 // the instruction has vector users only, and a phi for the vector value is 7539 // needed. In this case the recipe of the predicated instruction is marked to 7540 // also do that packing, thereby "hoisting" the insert-element sequence. 7541 // Otherwise, a phi node for the scalar value is needed. 7542 unsigned Part = State.Instance->Part; 7543 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7544 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7545 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7546 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7547 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7548 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7549 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7550 } else { 7551 Type *PredInstType = PredInst->getType(); 7552 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7553 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7554 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7555 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7556 } 7557 } 7558 7559 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7560 VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr; 7561 State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue, 7562 getMask()); 7563 } 7564 7565 // Determine how to lower the scalar epilogue, which depends on 1) optimising 7566 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 7567 // predication, and 4) a TTI hook that analyses whether the loop is suitable 7568 // for predication. 7569 static ScalarEpilogueLowering getScalarEpilogueLowering( 7570 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 7571 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 7572 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 7573 LoopVectorizationLegality &LVL) { 7574 bool OptSize = 7575 F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 7576 PGSOQueryType::IRPass); 7577 // 1) OptSize takes precedence over all other options, i.e. if this is set, 7578 // don't look at hints or options, and don't request a scalar epilogue. 7579 if (OptSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled) 7580 return CM_ScalarEpilogueNotAllowedOptSize; 7581 7582 bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() && 7583 !PreferPredicateOverEpilog; 7584 7585 // 2) Next, if disabling predication is requested on the command line, honour 7586 // this and request a scalar epilogue. 7587 if (PredicateOptDisabled) 7588 return CM_ScalarEpilogueAllowed; 7589 7590 // 3) and 4) look if enabling predication is requested on the command line, 7591 // with a loop hint, or if the TTI hook indicates this is profitable, request 7592 // predication . 7593 if (PreferPredicateOverEpilog || 7594 Hints.getPredicate() == LoopVectorizeHints::FK_Enabled || 7595 (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 7596 LVL.getLAI()) && 7597 Hints.getPredicate() != LoopVectorizeHints::FK_Disabled)) 7598 return CM_ScalarEpilogueNotNeededUsePredicate; 7599 7600 return CM_ScalarEpilogueAllowed; 7601 } 7602 7603 // Process the loop in the VPlan-native vectorization path. This path builds 7604 // VPlan upfront in the vectorization pipeline, which allows to apply 7605 // VPlan-to-VPlan transformations from the very beginning without modifying the 7606 // input LLVM IR. 7607 static bool processLoopInVPlanNativePath( 7608 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7609 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7610 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7611 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 7612 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 7613 7614 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7615 Function *F = L->getHeader()->getParent(); 7616 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7617 7618 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 7619 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 7620 7621 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7622 &Hints, IAI); 7623 // Use the planner for outer loop vectorization. 7624 // TODO: CM is not used at this point inside the planner. Turn CM into an 7625 // optional argument if we don't need it in the future. 7626 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 7627 7628 // Get user vectorization factor. 7629 const unsigned UserVF = Hints.getWidth(); 7630 7631 // Plan how to best vectorize, return the best VF and its cost. 7632 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 7633 7634 // If we are stress testing VPlan builds, do not attempt to generate vector 7635 // code. Masked vector code generation support will follow soon. 7636 // Also, do not attempt to vectorize if no vector code will be produced. 7637 if (VPlanBuildStressTest || EnableVPlanPredication || 7638 VectorizationFactor::Disabled() == VF) 7639 return false; 7640 7641 LVP.setBestPlan(VF.Width, 1); 7642 7643 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 7644 &CM); 7645 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7646 << L->getHeader()->getParent()->getName() << "\"\n"); 7647 LVP.executePlan(LB, DT); 7648 7649 // Mark the loop as already vectorized to avoid vectorizing again. 7650 Hints.setAlreadyVectorized(); 7651 7652 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7653 return true; 7654 } 7655 7656 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 7657 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 7658 !EnableLoopInterleaving), 7659 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 7660 !EnableLoopVectorization) {} 7661 7662 bool LoopVectorizePass::processLoop(Loop *L) { 7663 assert((EnableVPlanNativePath || L->empty()) && 7664 "VPlan-native path is not enabled. Only process inner loops."); 7665 7666 #ifndef NDEBUG 7667 const std::string DebugLocStr = getDebugLocString(L); 7668 #endif /* NDEBUG */ 7669 7670 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7671 << L->getHeader()->getParent()->getName() << "\" from " 7672 << DebugLocStr << "\n"); 7673 7674 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 7675 7676 LLVM_DEBUG( 7677 dbgs() << "LV: Loop hints:" 7678 << " force=" 7679 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7680 ? "disabled" 7681 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7682 ? "enabled" 7683 : "?")) 7684 << " width=" << Hints.getWidth() 7685 << " unroll=" << Hints.getInterleave() << "\n"); 7686 7687 // Function containing loop 7688 Function *F = L->getHeader()->getParent(); 7689 7690 // Looking at the diagnostic output is the only way to determine if a loop 7691 // was vectorized (other than looking at the IR or machine code), so it 7692 // is important to generate an optimization remark for each loop. Most of 7693 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7694 // generated as OptimizationRemark and OptimizationRemarkMissed are 7695 // less verbose reporting vectorized loops and unvectorized loops that may 7696 // benefit from vectorization, respectively. 7697 7698 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 7699 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7700 return false; 7701 } 7702 7703 PredicatedScalarEvolution PSE(*SE, *L); 7704 7705 // Check if it is legal to vectorize the loop. 7706 LoopVectorizationRequirements Requirements(*ORE); 7707 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 7708 &Requirements, &Hints, DB, AC); 7709 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7710 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7711 Hints.emitRemarkWithHints(); 7712 return false; 7713 } 7714 7715 // Check the function attributes and profiles to find out if this function 7716 // should be optimized for size. 7717 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 7718 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 7719 7720 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7721 // here. They may require CFG and instruction level transformations before 7722 // even evaluating whether vectorization is profitable. Since we cannot modify 7723 // the incoming IR, we need to build VPlan upfront in the vectorization 7724 // pipeline. 7725 if (!L->empty()) 7726 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7727 ORE, BFI, PSI, Hints); 7728 7729 assert(L->empty() && "Inner loop expected."); 7730 7731 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7732 // count by optimizing for size, to minimize overheads. 7733 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 7734 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 7735 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7736 << "This loop is worth vectorizing only if no scalar " 7737 << "iteration overheads are incurred."); 7738 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7739 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7740 else { 7741 LLVM_DEBUG(dbgs() << "\n"); 7742 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 7743 } 7744 } 7745 7746 // Check the function attributes to see if implicit floats are allowed. 7747 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7748 // an integer loop and the vector instructions selected are purely integer 7749 // vector instructions? 7750 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7751 reportVectorizationFailure( 7752 "Can't vectorize when the NoImplicitFloat attribute is used", 7753 "loop not vectorized due to NoImplicitFloat attribute", 7754 "NoImplicitFloat", ORE, L); 7755 Hints.emitRemarkWithHints(); 7756 return false; 7757 } 7758 7759 // Check if the target supports potentially unsafe FP vectorization. 7760 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7761 // for the target we're vectorizing for, to make sure none of the 7762 // additional fp-math flags can help. 7763 if (Hints.isPotentiallyUnsafe() && 7764 TTI->isFPVectorizationPotentiallyUnsafe()) { 7765 reportVectorizationFailure( 7766 "Potentially unsafe FP op prevents vectorization", 7767 "loop not vectorized due to unsafe FP support.", 7768 "UnsafeFP", ORE, L); 7769 Hints.emitRemarkWithHints(); 7770 return false; 7771 } 7772 7773 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7774 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7775 7776 // If an override option has been passed in for interleaved accesses, use it. 7777 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7778 UseInterleaved = EnableInterleavedMemAccesses; 7779 7780 // Analyze interleaved memory accesses. 7781 if (UseInterleaved) { 7782 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7783 } 7784 7785 // Use the cost model. 7786 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 7787 F, &Hints, IAI); 7788 CM.collectValuesToIgnore(); 7789 7790 // Use the planner for vectorization. 7791 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 7792 7793 // Get user vectorization factor. 7794 unsigned UserVF = Hints.getWidth(); 7795 7796 // Plan how to best vectorize, return the best VF and its cost. 7797 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF); 7798 7799 VectorizationFactor VF = VectorizationFactor::Disabled(); 7800 unsigned IC = 1; 7801 unsigned UserIC = Hints.getInterleave(); 7802 7803 if (MaybeVF) { 7804 VF = *MaybeVF; 7805 // Select the interleave count. 7806 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 7807 } 7808 7809 // Identify the diagnostic messages that should be produced. 7810 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7811 bool VectorizeLoop = true, InterleaveLoop = true; 7812 if (Requirements.doesNotMeet(F, L, Hints)) { 7813 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7814 "requirements.\n"); 7815 Hints.emitRemarkWithHints(); 7816 return false; 7817 } 7818 7819 if (VF.Width == 1) { 7820 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7821 VecDiagMsg = std::make_pair( 7822 "VectorizationNotBeneficial", 7823 "the cost-model indicates that vectorization is not beneficial"); 7824 VectorizeLoop = false; 7825 } 7826 7827 if (!MaybeVF && UserIC > 1) { 7828 // Tell the user interleaving was avoided up-front, despite being explicitly 7829 // requested. 7830 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 7831 "interleaving should be avoided up front\n"); 7832 IntDiagMsg = std::make_pair( 7833 "InterleavingAvoided", 7834 "Ignoring UserIC, because interleaving was avoided up front"); 7835 InterleaveLoop = false; 7836 } else if (IC == 1 && UserIC <= 1) { 7837 // Tell the user interleaving is not beneficial. 7838 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7839 IntDiagMsg = std::make_pair( 7840 "InterleavingNotBeneficial", 7841 "the cost-model indicates that interleaving is not beneficial"); 7842 InterleaveLoop = false; 7843 if (UserIC == 1) { 7844 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7845 IntDiagMsg.second += 7846 " and is explicitly disabled or interleave count is set to 1"; 7847 } 7848 } else if (IC > 1 && UserIC == 1) { 7849 // Tell the user interleaving is beneficial, but it explicitly disabled. 7850 LLVM_DEBUG( 7851 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7852 IntDiagMsg = std::make_pair( 7853 "InterleavingBeneficialButDisabled", 7854 "the cost-model indicates that interleaving is beneficial " 7855 "but is explicitly disabled or interleave count is set to 1"); 7856 InterleaveLoop = false; 7857 } 7858 7859 // Override IC if user provided an interleave count. 7860 IC = UserIC > 0 ? UserIC : IC; 7861 7862 // Emit diagnostic messages, if any. 7863 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7864 if (!VectorizeLoop && !InterleaveLoop) { 7865 // Do not vectorize or interleaving the loop. 7866 ORE->emit([&]() { 7867 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7868 L->getStartLoc(), L->getHeader()) 7869 << VecDiagMsg.second; 7870 }); 7871 ORE->emit([&]() { 7872 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7873 L->getStartLoc(), L->getHeader()) 7874 << IntDiagMsg.second; 7875 }); 7876 return false; 7877 } else if (!VectorizeLoop && InterleaveLoop) { 7878 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7879 ORE->emit([&]() { 7880 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7881 L->getStartLoc(), L->getHeader()) 7882 << VecDiagMsg.second; 7883 }); 7884 } else if (VectorizeLoop && !InterleaveLoop) { 7885 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7886 << ") in " << DebugLocStr << '\n'); 7887 ORE->emit([&]() { 7888 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7889 L->getStartLoc(), L->getHeader()) 7890 << IntDiagMsg.second; 7891 }); 7892 } else if (VectorizeLoop && InterleaveLoop) { 7893 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7894 << ") in " << DebugLocStr << '\n'); 7895 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7896 } 7897 7898 LVP.setBestPlan(VF.Width, IC); 7899 7900 using namespace ore; 7901 bool DisableRuntimeUnroll = false; 7902 MDNode *OrigLoopID = L->getLoopID(); 7903 7904 if (!VectorizeLoop) { 7905 assert(IC > 1 && "interleave count should not be 1 or 0"); 7906 // If we decided that it is not legal to vectorize the loop, then 7907 // interleave it. 7908 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7909 &CM); 7910 LVP.executePlan(Unroller, DT); 7911 7912 ORE->emit([&]() { 7913 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7914 L->getHeader()) 7915 << "interleaved loop (interleaved count: " 7916 << NV("InterleaveCount", IC) << ")"; 7917 }); 7918 } else { 7919 // If we decided that it is *legal* to vectorize the loop, then do it. 7920 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7921 &LVL, &CM); 7922 LVP.executePlan(LB, DT); 7923 ++LoopsVectorized; 7924 7925 // Add metadata to disable runtime unrolling a scalar loop when there are 7926 // no runtime checks about strides and memory. A scalar loop that is 7927 // rarely used is not worth unrolling. 7928 if (!LB.areSafetyChecksAdded()) 7929 DisableRuntimeUnroll = true; 7930 7931 // Report the vectorization decision. 7932 ORE->emit([&]() { 7933 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7934 L->getHeader()) 7935 << "vectorized loop (vectorization width: " 7936 << NV("VectorizationFactor", VF.Width) 7937 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7938 }); 7939 } 7940 7941 Optional<MDNode *> RemainderLoopID = 7942 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7943 LLVMLoopVectorizeFollowupEpilogue}); 7944 if (RemainderLoopID.hasValue()) { 7945 L->setLoopID(RemainderLoopID.getValue()); 7946 } else { 7947 if (DisableRuntimeUnroll) 7948 AddRuntimeUnrollDisableMetaData(L); 7949 7950 // Mark the loop as already vectorized to avoid vectorizing again. 7951 Hints.setAlreadyVectorized(); 7952 } 7953 7954 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7955 return true; 7956 } 7957 7958 LoopVectorizeResult LoopVectorizePass::runImpl( 7959 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7960 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7961 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7962 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7963 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 7964 SE = &SE_; 7965 LI = &LI_; 7966 TTI = &TTI_; 7967 DT = &DT_; 7968 BFI = &BFI_; 7969 TLI = TLI_; 7970 AA = &AA_; 7971 AC = &AC_; 7972 GetLAA = &GetLAA_; 7973 DB = &DB_; 7974 ORE = &ORE_; 7975 PSI = PSI_; 7976 7977 // Don't attempt if 7978 // 1. the target claims to have no vector registers, and 7979 // 2. interleaving won't help ILP. 7980 // 7981 // The second condition is necessary because, even if the target has no 7982 // vector registers, loop vectorization may still enable scalar 7983 // interleaving. 7984 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 7985 TTI->getMaxInterleaveFactor(1) < 2) 7986 return LoopVectorizeResult(false, false); 7987 7988 bool Changed = false, CFGChanged = false; 7989 7990 // The vectorizer requires loops to be in simplified form. 7991 // Since simplification may add new inner loops, it has to run before the 7992 // legality and profitability checks. This means running the loop vectorizer 7993 // will simplify all loops, regardless of whether anything end up being 7994 // vectorized. 7995 for (auto &L : *LI) 7996 Changed |= CFGChanged |= 7997 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 7998 7999 // Build up a worklist of inner-loops to vectorize. This is necessary as 8000 // the act of vectorizing or partially unrolling a loop creates new loops 8001 // and can invalidate iterators across the loops. 8002 SmallVector<Loop *, 8> Worklist; 8003 8004 for (Loop *L : *LI) 8005 collectSupportedLoops(*L, LI, ORE, Worklist); 8006 8007 LoopsAnalyzed += Worklist.size(); 8008 8009 // Now walk the identified inner loops. 8010 while (!Worklist.empty()) { 8011 Loop *L = Worklist.pop_back_val(); 8012 8013 // For the inner loops we actually process, form LCSSA to simplify the 8014 // transform. 8015 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 8016 8017 Changed |= CFGChanged |= processLoop(L); 8018 } 8019 8020 // Process each loop nest in the function. 8021 return LoopVectorizeResult(Changed, CFGChanged); 8022 } 8023 8024 PreservedAnalyses LoopVectorizePass::run(Function &F, 8025 FunctionAnalysisManager &AM) { 8026 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 8027 auto &LI = AM.getResult<LoopAnalysis>(F); 8028 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 8029 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 8030 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 8031 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 8032 auto &AA = AM.getResult<AAManager>(F); 8033 auto &AC = AM.getResult<AssumptionAnalysis>(F); 8034 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 8035 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 8036 MemorySSA *MSSA = EnableMSSALoopDependency 8037 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 8038 : nullptr; 8039 8040 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 8041 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 8042 [&](Loop &L) -> const LoopAccessInfo & { 8043 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA}; 8044 return LAM.getResult<LoopAccessAnalysis>(L, AR); 8045 }; 8046 const ModuleAnalysisManager &MAM = 8047 AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager(); 8048 ProfileSummaryInfo *PSI = 8049 MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 8050 LoopVectorizeResult Result = 8051 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 8052 if (!Result.MadeAnyChange) 8053 return PreservedAnalyses::all(); 8054 PreservedAnalyses PA; 8055 8056 // We currently do not preserve loopinfo/dominator analyses with outer loop 8057 // vectorization. Until this is addressed, mark these analyses as preserved 8058 // only for non-VPlan-native path. 8059 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 8060 if (!EnableVPlanNativePath) { 8061 PA.preserve<LoopAnalysis>(); 8062 PA.preserve<DominatorTreeAnalysis>(); 8063 } 8064 PA.preserve<BasicAA>(); 8065 PA.preserve<GlobalsAA>(); 8066 if (!Result.MadeCFGChange) 8067 PA.preserveSet<CFGAnalyses>(); 8068 return PA; 8069 } 8070