1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpander.h" 95 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 96 #include "llvm/Analysis/TargetLibraryInfo.h" 97 #include "llvm/Analysis/TargetTransformInfo.h" 98 #include "llvm/Analysis/VectorUtils.h" 99 #include "llvm/IR/Attributes.h" 100 #include "llvm/IR/BasicBlock.h" 101 #include "llvm/IR/CFG.h" 102 #include "llvm/IR/Constant.h" 103 #include "llvm/IR/Constants.h" 104 #include "llvm/IR/DataLayout.h" 105 #include "llvm/IR/DebugInfoMetadata.h" 106 #include "llvm/IR/DebugLoc.h" 107 #include "llvm/IR/DerivedTypes.h" 108 #include "llvm/IR/DiagnosticInfo.h" 109 #include "llvm/IR/Dominators.h" 110 #include "llvm/IR/Function.h" 111 #include "llvm/IR/IRBuilder.h" 112 #include "llvm/IR/InstrTypes.h" 113 #include "llvm/IR/Instruction.h" 114 #include "llvm/IR/Instructions.h" 115 #include "llvm/IR/IntrinsicInst.h" 116 #include "llvm/IR/Intrinsics.h" 117 #include "llvm/IR/LLVMContext.h" 118 #include "llvm/IR/Metadata.h" 119 #include "llvm/IR/Module.h" 120 #include "llvm/IR/Operator.h" 121 #include "llvm/IR/Type.h" 122 #include "llvm/IR/Use.h" 123 #include "llvm/IR/User.h" 124 #include "llvm/IR/Value.h" 125 #include "llvm/IR/ValueHandle.h" 126 #include "llvm/IR/Verifier.h" 127 #include "llvm/InitializePasses.h" 128 #include "llvm/Pass.h" 129 #include "llvm/Support/Casting.h" 130 #include "llvm/Support/CommandLine.h" 131 #include "llvm/Support/Compiler.h" 132 #include "llvm/Support/Debug.h" 133 #include "llvm/Support/ErrorHandling.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <cstdlib> 147 #include <functional> 148 #include <iterator> 149 #include <limits> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 /// @{ 161 /// Metadata attribute names 162 static const char *const LLVMLoopVectorizeFollowupAll = 163 "llvm.loop.vectorize.followup_all"; 164 static const char *const LLVMLoopVectorizeFollowupVectorized = 165 "llvm.loop.vectorize.followup_vectorized"; 166 static const char *const LLVMLoopVectorizeFollowupEpilogue = 167 "llvm.loop.vectorize.followup_epilogue"; 168 /// @} 169 170 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 172 173 /// Loops with a known constant trip count below this number are vectorized only 174 /// if no scalar iteration overheads are incurred. 175 static cl::opt<unsigned> TinyTripCountVectorThreshold( 176 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 177 cl::desc("Loops with a constant trip count that is smaller than this " 178 "value are vectorized only if no scalar iteration overheads " 179 "are incurred.")); 180 181 // Indicates that an epilogue is undesired, predication is preferred. 182 // This means that the vectorizer will try to fold the loop-tail (epilogue) 183 // into the loop and predicate the loop body accordingly. 184 static cl::opt<bool> PreferPredicateOverEpilog( 185 "prefer-predicate-over-epilog", cl::init(false), cl::Hidden, 186 cl::desc("Indicate that an epilogue is undesired, predication should be " 187 "used instead.")); 188 189 static cl::opt<bool> MaximizeBandwidth( 190 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 191 cl::desc("Maximize bandwidth when selecting vectorization factor which " 192 "will be determined by the smallest type in loop.")); 193 194 static cl::opt<bool> EnableInterleavedMemAccesses( 195 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 196 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 197 198 /// An interleave-group may need masking if it resides in a block that needs 199 /// predication, or in order to mask away gaps. 200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 201 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 202 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 203 204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 205 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 206 cl::desc("We don't interleave loops with a estimated constant trip count " 207 "below this number")); 208 209 static cl::opt<unsigned> ForceTargetNumScalarRegs( 210 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 211 cl::desc("A flag that overrides the target's number of scalar registers.")); 212 213 static cl::opt<unsigned> ForceTargetNumVectorRegs( 214 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 215 cl::desc("A flag that overrides the target's number of vector registers.")); 216 217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 218 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 219 cl::desc("A flag that overrides the target's max interleave factor for " 220 "scalar loops.")); 221 222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 223 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 224 cl::desc("A flag that overrides the target's max interleave factor for " 225 "vectorized loops.")); 226 227 static cl::opt<unsigned> ForceTargetInstructionCost( 228 "force-target-instruction-cost", cl::init(0), cl::Hidden, 229 cl::desc("A flag that overrides the target's expected cost for " 230 "an instruction to a single constant value. Mostly " 231 "useful for getting consistent testing.")); 232 233 static cl::opt<unsigned> SmallLoopCost( 234 "small-loop-cost", cl::init(20), cl::Hidden, 235 cl::desc( 236 "The cost of a loop that is considered 'small' by the interleaver.")); 237 238 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 239 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 240 cl::desc("Enable the use of the block frequency analysis to access PGO " 241 "heuristics minimizing code growth in cold regions and being more " 242 "aggressive in hot regions.")); 243 244 // Runtime interleave loops for load/store throughput. 245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 246 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 247 cl::desc( 248 "Enable runtime interleaving until load/store ports are saturated")); 249 250 /// The number of stores in a loop that are allowed to need predication. 251 static cl::opt<unsigned> NumberOfStoresToPredicate( 252 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 253 cl::desc("Max number of stores to be predicated behind an if.")); 254 255 static cl::opt<bool> EnableIndVarRegisterHeur( 256 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 257 cl::desc("Count the induction variable only once when interleaving")); 258 259 static cl::opt<bool> EnableCondStoresVectorization( 260 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 261 cl::desc("Enable if predication of stores during vectorization.")); 262 263 static cl::opt<unsigned> MaxNestedScalarReductionIC( 264 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 265 cl::desc("The maximum interleave count to use when interleaving a scalar " 266 "reduction in a nested loop.")); 267 268 cl::opt<bool> EnableVPlanNativePath( 269 "enable-vplan-native-path", cl::init(false), cl::Hidden, 270 cl::desc("Enable VPlan-native vectorization path with " 271 "support for outer loop vectorization.")); 272 273 // FIXME: Remove this switch once we have divergence analysis. Currently we 274 // assume divergent non-backedge branches when this switch is true. 275 cl::opt<bool> EnableVPlanPredication( 276 "enable-vplan-predication", cl::init(false), cl::Hidden, 277 cl::desc("Enable VPlan-native vectorization path predicator with " 278 "support for outer loop vectorization.")); 279 280 // This flag enables the stress testing of the VPlan H-CFG construction in the 281 // VPlan-native vectorization path. It must be used in conjuction with 282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 283 // verification of the H-CFGs built. 284 static cl::opt<bool> VPlanBuildStressTest( 285 "vplan-build-stress-test", cl::init(false), cl::Hidden, 286 cl::desc( 287 "Build VPlan for every supported loop nest in the function and bail " 288 "out right after the build (stress test the VPlan H-CFG construction " 289 "in the VPlan-native vectorization path).")); 290 291 cl::opt<bool> llvm::EnableLoopInterleaving( 292 "interleave-loops", cl::init(true), cl::Hidden, 293 cl::desc("Enable loop interleaving in Loop vectorization passes")); 294 cl::opt<bool> llvm::EnableLoopVectorization( 295 "vectorize-loops", cl::init(true), cl::Hidden, 296 cl::desc("Run the Loop vectorization passes")); 297 298 /// A helper function that returns the type of loaded or stored value. 299 static Type *getMemInstValueType(Value *I) { 300 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 301 "Expected Load or Store instruction"); 302 if (auto *LI = dyn_cast<LoadInst>(I)) 303 return LI->getType(); 304 return cast<StoreInst>(I)->getValueOperand()->getType(); 305 } 306 307 /// A helper function that returns true if the given type is irregular. The 308 /// type is irregular if its allocated size doesn't equal the store size of an 309 /// element of the corresponding vector type at the given vectorization factor. 310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 311 // Determine if an array of VF elements of type Ty is "bitcast compatible" 312 // with a <VF x Ty> vector. 313 if (VF > 1) { 314 auto *VectorTy = VectorType::get(Ty, VF); 315 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 316 } 317 318 // If the vectorization factor is one, we just check if an array of type Ty 319 // requires padding between elements. 320 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 321 } 322 323 /// A helper function that returns the reciprocal of the block probability of 324 /// predicated blocks. If we return X, we are assuming the predicated block 325 /// will execute once for every X iterations of the loop header. 326 /// 327 /// TODO: We should use actual block probability here, if available. Currently, 328 /// we always assume predicated blocks have a 50% chance of executing. 329 static unsigned getReciprocalPredBlockProb() { return 2; } 330 331 /// A helper function that adds a 'fast' flag to floating-point operations. 332 static Value *addFastMathFlag(Value *V) { 333 if (isa<FPMathOperator>(V)) 334 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 335 return V; 336 } 337 338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 339 if (isa<FPMathOperator>(V)) 340 cast<Instruction>(V)->setFastMathFlags(FMF); 341 return V; 342 } 343 344 /// A helper function that returns an integer or floating-point constant with 345 /// value C. 346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 347 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 348 : ConstantFP::get(Ty, C); 349 } 350 351 /// Returns "best known" trip count for the specified loop \p L as defined by 352 /// the following procedure: 353 /// 1) Returns exact trip count if it is known. 354 /// 2) Returns expected trip count according to profile data if any. 355 /// 3) Returns upper bound estimate if it is known. 356 /// 4) Returns None if all of the above failed. 357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 358 // Check if exact trip count is known. 359 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 360 return ExpectedTC; 361 362 // Check if there is an expected trip count available from profile data. 363 if (LoopVectorizeWithBlockFrequency) 364 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 365 return EstimatedTC; 366 367 // Check if upper bound estimate is known. 368 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 369 return ExpectedTC; 370 371 return None; 372 } 373 374 namespace llvm { 375 376 /// InnerLoopVectorizer vectorizes loops which contain only one basic 377 /// block to a specified vectorization factor (VF). 378 /// This class performs the widening of scalars into vectors, or multiple 379 /// scalars. This class also implements the following features: 380 /// * It inserts an epilogue loop for handling loops that don't have iteration 381 /// counts that are known to be a multiple of the vectorization factor. 382 /// * It handles the code generation for reduction variables. 383 /// * Scalarization (implementation using scalars) of un-vectorizable 384 /// instructions. 385 /// InnerLoopVectorizer does not perform any vectorization-legality 386 /// checks, and relies on the caller to check for the different legality 387 /// aspects. The InnerLoopVectorizer relies on the 388 /// LoopVectorizationLegality class to provide information about the induction 389 /// and reduction variables that were found to a given vectorization factor. 390 class InnerLoopVectorizer { 391 public: 392 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 393 LoopInfo *LI, DominatorTree *DT, 394 const TargetLibraryInfo *TLI, 395 const TargetTransformInfo *TTI, AssumptionCache *AC, 396 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 397 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 398 LoopVectorizationCostModel *CM) 399 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 400 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 401 Builder(PSE.getSE()->getContext()), 402 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 403 virtual ~InnerLoopVectorizer() = default; 404 405 /// Create a new empty loop. Unlink the old loop and connect the new one. 406 /// Return the pre-header block of the new loop. 407 BasicBlock *createVectorizedLoopSkeleton(); 408 409 /// Widen a single instruction within the innermost loop. 410 void widenInstruction(Instruction &I, VPUser &Operands, 411 VPTransformState &State); 412 413 /// Widen a single call instruction within the innermost loop. 414 void widenCallInstruction(CallInst &I, VPUser &ArgOperands, 415 VPTransformState &State); 416 417 /// Widen a single select instruction within the innermost loop. 418 void widenSelectInstruction(SelectInst &I, bool InvariantCond); 419 420 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 421 void fixVectorizedLoop(); 422 423 // Return true if any runtime check is added. 424 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 425 426 /// A type for vectorized values in the new loop. Each value from the 427 /// original loop, when vectorized, is represented by UF vector values in the 428 /// new unrolled loop, where UF is the unroll factor. 429 using VectorParts = SmallVector<Value *, 2>; 430 431 /// Vectorize a single GetElementPtrInst based on information gathered and 432 /// decisions taken during planning. 433 void widenGEP(GetElementPtrInst *GEP, unsigned UF, unsigned VF, 434 bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant); 435 436 /// Vectorize a single PHINode in a block. This method handles the induction 437 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 438 /// arbitrary length vectors. 439 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 440 441 /// A helper function to scalarize a single Instruction in the innermost loop. 442 /// Generates a sequence of scalar instances for each lane between \p MinLane 443 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 444 /// inclusive.. 445 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 446 bool IfPredicateInstr); 447 448 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 449 /// is provided, the integer induction variable will first be truncated to 450 /// the corresponding type. 451 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 452 453 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 454 /// vector or scalar value on-demand if one is not yet available. When 455 /// vectorizing a loop, we visit the definition of an instruction before its 456 /// uses. When visiting the definition, we either vectorize or scalarize the 457 /// instruction, creating an entry for it in the corresponding map. (In some 458 /// cases, such as induction variables, we will create both vector and scalar 459 /// entries.) Then, as we encounter uses of the definition, we derive values 460 /// for each scalar or vector use unless such a value is already available. 461 /// For example, if we scalarize a definition and one of its uses is vector, 462 /// we build the required vector on-demand with an insertelement sequence 463 /// when visiting the use. Otherwise, if the use is scalar, we can use the 464 /// existing scalar definition. 465 /// 466 /// Return a value in the new loop corresponding to \p V from the original 467 /// loop at unroll index \p Part. If the value has already been vectorized, 468 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 469 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 470 /// a new vector value on-demand by inserting the scalar values into a vector 471 /// with an insertelement sequence. If the value has been neither vectorized 472 /// nor scalarized, it must be loop invariant, so we simply broadcast the 473 /// value into a vector. 474 Value *getOrCreateVectorValue(Value *V, unsigned Part); 475 476 /// Return a value in the new loop corresponding to \p V from the original 477 /// loop at unroll and vector indices \p Instance. If the value has been 478 /// vectorized but not scalarized, the necessary extractelement instruction 479 /// will be generated. 480 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 481 482 /// Construct the vector value of a scalarized value \p V one lane at a time. 483 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 484 485 /// Try to vectorize interleaved access group \p Group with the base address 486 /// given in \p Addr, optionally masking the vector operations if \p 487 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 488 /// values in the vectorized loop. 489 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 490 VPTransformState &State, VPValue *Addr, 491 VPValue *BlockInMask = nullptr); 492 493 /// Vectorize Load and Store instructions with the base address given in \p 494 /// Addr, optionally masking the vector operations if \p BlockInMask is 495 /// non-null. Use \p State to translate given VPValues to IR values in the 496 /// vectorized loop. 497 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 498 VPValue *Addr, VPValue *StoredValue, 499 VPValue *BlockInMask); 500 501 /// Set the debug location in the builder using the debug location in 502 /// the instruction. 503 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 504 505 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 506 void fixNonInductionPHIs(void); 507 508 protected: 509 friend class LoopVectorizationPlanner; 510 511 /// A small list of PHINodes. 512 using PhiVector = SmallVector<PHINode *, 4>; 513 514 /// A type for scalarized values in the new loop. Each value from the 515 /// original loop, when scalarized, is represented by UF x VF scalar values 516 /// in the new unrolled loop, where UF is the unroll factor and VF is the 517 /// vectorization factor. 518 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 519 520 /// Set up the values of the IVs correctly when exiting the vector loop. 521 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 522 Value *CountRoundDown, Value *EndValue, 523 BasicBlock *MiddleBlock); 524 525 /// Create a new induction variable inside L. 526 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 527 Value *Step, Instruction *DL); 528 529 /// Handle all cross-iteration phis in the header. 530 void fixCrossIterationPHIs(); 531 532 /// Fix a first-order recurrence. This is the second phase of vectorizing 533 /// this phi node. 534 void fixFirstOrderRecurrence(PHINode *Phi); 535 536 /// Fix a reduction cross-iteration phi. This is the second phase of 537 /// vectorizing this phi node. 538 void fixReduction(PHINode *Phi); 539 540 /// Clear NSW/NUW flags from reduction instructions if necessary. 541 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc); 542 543 /// The Loop exit block may have single value PHI nodes with some 544 /// incoming value. While vectorizing we only handled real values 545 /// that were defined inside the loop and we should have one value for 546 /// each predecessor of its parent basic block. See PR14725. 547 void fixLCSSAPHIs(); 548 549 /// Iteratively sink the scalarized operands of a predicated instruction into 550 /// the block that was created for it. 551 void sinkScalarOperands(Instruction *PredInst); 552 553 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 554 /// represented as. 555 void truncateToMinimalBitwidths(); 556 557 /// Create a broadcast instruction. This method generates a broadcast 558 /// instruction (shuffle) for loop invariant values and for the induction 559 /// value. If this is the induction variable then we extend it to N, N+1, ... 560 /// this is needed because each iteration in the loop corresponds to a SIMD 561 /// element. 562 virtual Value *getBroadcastInstrs(Value *V); 563 564 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 565 /// to each vector element of Val. The sequence starts at StartIndex. 566 /// \p Opcode is relevant for FP induction variable. 567 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 568 Instruction::BinaryOps Opcode = 569 Instruction::BinaryOpsEnd); 570 571 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 572 /// variable on which to base the steps, \p Step is the size of the step, and 573 /// \p EntryVal is the value from the original loop that maps to the steps. 574 /// Note that \p EntryVal doesn't have to be an induction variable - it 575 /// can also be a truncate instruction. 576 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 577 const InductionDescriptor &ID); 578 579 /// Create a vector induction phi node based on an existing scalar one. \p 580 /// EntryVal is the value from the original loop that maps to the vector phi 581 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 582 /// truncate instruction, instead of widening the original IV, we widen a 583 /// version of the IV truncated to \p EntryVal's type. 584 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 585 Value *Step, Instruction *EntryVal); 586 587 /// Returns true if an instruction \p I should be scalarized instead of 588 /// vectorized for the chosen vectorization factor. 589 bool shouldScalarizeInstruction(Instruction *I) const; 590 591 /// Returns true if we should generate a scalar version of \p IV. 592 bool needsScalarInduction(Instruction *IV) const; 593 594 /// If there is a cast involved in the induction variable \p ID, which should 595 /// be ignored in the vectorized loop body, this function records the 596 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 597 /// cast. We had already proved that the casted Phi is equal to the uncasted 598 /// Phi in the vectorized loop (under a runtime guard), and therefore 599 /// there is no need to vectorize the cast - the same value can be used in the 600 /// vector loop for both the Phi and the cast. 601 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 602 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 603 /// 604 /// \p EntryVal is the value from the original loop that maps to the vector 605 /// phi node and is used to distinguish what is the IV currently being 606 /// processed - original one (if \p EntryVal is a phi corresponding to the 607 /// original IV) or the "newly-created" one based on the proof mentioned above 608 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 609 /// latter case \p EntryVal is a TruncInst and we must not record anything for 610 /// that IV, but it's error-prone to expect callers of this routine to care 611 /// about that, hence this explicit parameter. 612 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 613 const Instruction *EntryVal, 614 Value *VectorLoopValue, 615 unsigned Part, 616 unsigned Lane = UINT_MAX); 617 618 /// Generate a shuffle sequence that will reverse the vector Vec. 619 virtual Value *reverseVector(Value *Vec); 620 621 /// Returns (and creates if needed) the original loop trip count. 622 Value *getOrCreateTripCount(Loop *NewLoop); 623 624 /// Returns (and creates if needed) the trip count of the widened loop. 625 Value *getOrCreateVectorTripCount(Loop *NewLoop); 626 627 /// Returns a bitcasted value to the requested vector type. 628 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 629 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 630 const DataLayout &DL); 631 632 /// Emit a bypass check to see if the vector trip count is zero, including if 633 /// it overflows. 634 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 635 636 /// Emit a bypass check to see if all of the SCEV assumptions we've 637 /// had to make are correct. 638 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 639 640 /// Emit bypass checks to check any memory assumptions we may have made. 641 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 642 643 /// Compute the transformed value of Index at offset StartValue using step 644 /// StepValue. 645 /// For integer induction, returns StartValue + Index * StepValue. 646 /// For pointer induction, returns StartValue[Index * StepValue]. 647 /// FIXME: The newly created binary instructions should contain nsw/nuw 648 /// flags, which can be found from the original scalar operations. 649 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 650 const DataLayout &DL, 651 const InductionDescriptor &ID) const; 652 653 /// Add additional metadata to \p To that was not present on \p Orig. 654 /// 655 /// Currently this is used to add the noalias annotations based on the 656 /// inserted memchecks. Use this for instructions that are *cloned* into the 657 /// vector loop. 658 void addNewMetadata(Instruction *To, const Instruction *Orig); 659 660 /// Add metadata from one instruction to another. 661 /// 662 /// This includes both the original MDs from \p From and additional ones (\see 663 /// addNewMetadata). Use this for *newly created* instructions in the vector 664 /// loop. 665 void addMetadata(Instruction *To, Instruction *From); 666 667 /// Similar to the previous function but it adds the metadata to a 668 /// vector of instructions. 669 void addMetadata(ArrayRef<Value *> To, Instruction *From); 670 671 /// The original loop. 672 Loop *OrigLoop; 673 674 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 675 /// dynamic knowledge to simplify SCEV expressions and converts them to a 676 /// more usable form. 677 PredicatedScalarEvolution &PSE; 678 679 /// Loop Info. 680 LoopInfo *LI; 681 682 /// Dominator Tree. 683 DominatorTree *DT; 684 685 /// Alias Analysis. 686 AliasAnalysis *AA; 687 688 /// Target Library Info. 689 const TargetLibraryInfo *TLI; 690 691 /// Target Transform Info. 692 const TargetTransformInfo *TTI; 693 694 /// Assumption Cache. 695 AssumptionCache *AC; 696 697 /// Interface to emit optimization remarks. 698 OptimizationRemarkEmitter *ORE; 699 700 /// LoopVersioning. It's only set up (non-null) if memchecks were 701 /// used. 702 /// 703 /// This is currently only used to add no-alias metadata based on the 704 /// memchecks. The actually versioning is performed manually. 705 std::unique_ptr<LoopVersioning> LVer; 706 707 /// The vectorization SIMD factor to use. Each vector will have this many 708 /// vector elements. 709 unsigned VF; 710 711 /// The vectorization unroll factor to use. Each scalar is vectorized to this 712 /// many different vector instructions. 713 unsigned UF; 714 715 /// The builder that we use 716 IRBuilder<> Builder; 717 718 // --- Vectorization state --- 719 720 /// The vector-loop preheader. 721 BasicBlock *LoopVectorPreHeader; 722 723 /// The scalar-loop preheader. 724 BasicBlock *LoopScalarPreHeader; 725 726 /// Middle Block between the vector and the scalar. 727 BasicBlock *LoopMiddleBlock; 728 729 /// The ExitBlock of the scalar loop. 730 BasicBlock *LoopExitBlock; 731 732 /// The vector loop body. 733 BasicBlock *LoopVectorBody; 734 735 /// The scalar loop body. 736 BasicBlock *LoopScalarBody; 737 738 /// A list of all bypass blocks. The first block is the entry of the loop. 739 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 740 741 /// The new Induction variable which was added to the new block. 742 PHINode *Induction = nullptr; 743 744 /// The induction variable of the old basic block. 745 PHINode *OldInduction = nullptr; 746 747 /// Maps values from the original loop to their corresponding values in the 748 /// vectorized loop. A key value can map to either vector values, scalar 749 /// values or both kinds of values, depending on whether the key was 750 /// vectorized and scalarized. 751 VectorizerValueMap VectorLoopValueMap; 752 753 /// Store instructions that were predicated. 754 SmallVector<Instruction *, 4> PredicatedInstructions; 755 756 /// Trip count of the original loop. 757 Value *TripCount = nullptr; 758 759 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 760 Value *VectorTripCount = nullptr; 761 762 /// The legality analysis. 763 LoopVectorizationLegality *Legal; 764 765 /// The profitablity analysis. 766 LoopVectorizationCostModel *Cost; 767 768 // Record whether runtime checks are added. 769 bool AddedSafetyChecks = false; 770 771 // Holds the end values for each induction variable. We save the end values 772 // so we can later fix-up the external users of the induction variables. 773 DenseMap<PHINode *, Value *> IVEndValues; 774 775 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 776 // fixed up at the end of vector code generation. 777 SmallVector<PHINode *, 8> OrigPHIsToFix; 778 }; 779 780 class InnerLoopUnroller : public InnerLoopVectorizer { 781 public: 782 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 783 LoopInfo *LI, DominatorTree *DT, 784 const TargetLibraryInfo *TLI, 785 const TargetTransformInfo *TTI, AssumptionCache *AC, 786 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 787 LoopVectorizationLegality *LVL, 788 LoopVectorizationCostModel *CM) 789 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 790 UnrollFactor, LVL, CM) {} 791 792 private: 793 Value *getBroadcastInstrs(Value *V) override; 794 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 795 Instruction::BinaryOps Opcode = 796 Instruction::BinaryOpsEnd) override; 797 Value *reverseVector(Value *Vec) override; 798 }; 799 800 } // end namespace llvm 801 802 /// Look for a meaningful debug location on the instruction or it's 803 /// operands. 804 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 805 if (!I) 806 return I; 807 808 DebugLoc Empty; 809 if (I->getDebugLoc() != Empty) 810 return I; 811 812 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 813 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 814 if (OpInst->getDebugLoc() != Empty) 815 return OpInst; 816 } 817 818 return I; 819 } 820 821 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 822 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 823 const DILocation *DIL = Inst->getDebugLoc(); 824 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 825 !isa<DbgInfoIntrinsic>(Inst)) { 826 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF); 827 if (NewDIL) 828 B.SetCurrentDebugLocation(NewDIL.getValue()); 829 else 830 LLVM_DEBUG(dbgs() 831 << "Failed to create new discriminator: " 832 << DIL->getFilename() << " Line: " << DIL->getLine()); 833 } 834 else 835 B.SetCurrentDebugLocation(DIL); 836 } else 837 B.SetCurrentDebugLocation(DebugLoc()); 838 } 839 840 /// Write a record \p DebugMsg about vectorization failure to the debug 841 /// output stream. If \p I is passed, it is an instruction that prevents 842 /// vectorization. 843 #ifndef NDEBUG 844 static void debugVectorizationFailure(const StringRef DebugMsg, 845 Instruction *I) { 846 dbgs() << "LV: Not vectorizing: " << DebugMsg; 847 if (I != nullptr) 848 dbgs() << " " << *I; 849 else 850 dbgs() << '.'; 851 dbgs() << '\n'; 852 } 853 #endif 854 855 /// Create an analysis remark that explains why vectorization failed 856 /// 857 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 858 /// RemarkName is the identifier for the remark. If \p I is passed it is an 859 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 860 /// the location of the remark. \return the remark object that can be 861 /// streamed to. 862 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 863 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 864 Value *CodeRegion = TheLoop->getHeader(); 865 DebugLoc DL = TheLoop->getStartLoc(); 866 867 if (I) { 868 CodeRegion = I->getParent(); 869 // If there is no debug location attached to the instruction, revert back to 870 // using the loop's. 871 if (I->getDebugLoc()) 872 DL = I->getDebugLoc(); 873 } 874 875 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 876 R << "loop not vectorized: "; 877 return R; 878 } 879 880 namespace llvm { 881 882 void reportVectorizationFailure(const StringRef DebugMsg, 883 const StringRef OREMsg, const StringRef ORETag, 884 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 885 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 886 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 887 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 888 ORETag, TheLoop, I) << OREMsg); 889 } 890 891 } // end namespace llvm 892 893 #ifndef NDEBUG 894 /// \return string containing a file name and a line # for the given loop. 895 static std::string getDebugLocString(const Loop *L) { 896 std::string Result; 897 if (L) { 898 raw_string_ostream OS(Result); 899 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 900 LoopDbgLoc.print(OS); 901 else 902 // Just print the module name. 903 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 904 OS.flush(); 905 } 906 return Result; 907 } 908 #endif 909 910 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 911 const Instruction *Orig) { 912 // If the loop was versioned with memchecks, add the corresponding no-alias 913 // metadata. 914 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 915 LVer->annotateInstWithNoAlias(To, Orig); 916 } 917 918 void InnerLoopVectorizer::addMetadata(Instruction *To, 919 Instruction *From) { 920 propagateMetadata(To, From); 921 addNewMetadata(To, From); 922 } 923 924 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 925 Instruction *From) { 926 for (Value *V : To) { 927 if (Instruction *I = dyn_cast<Instruction>(V)) 928 addMetadata(I, From); 929 } 930 } 931 932 namespace llvm { 933 934 // Loop vectorization cost-model hints how the scalar epilogue loop should be 935 // lowered. 936 enum ScalarEpilogueLowering { 937 938 // The default: allowing scalar epilogues. 939 CM_ScalarEpilogueAllowed, 940 941 // Vectorization with OptForSize: don't allow epilogues. 942 CM_ScalarEpilogueNotAllowedOptSize, 943 944 // A special case of vectorisation with OptForSize: loops with a very small 945 // trip count are considered for vectorization under OptForSize, thereby 946 // making sure the cost of their loop body is dominant, free of runtime 947 // guards and scalar iteration overheads. 948 CM_ScalarEpilogueNotAllowedLowTripLoop, 949 950 // Loop hint predicate indicating an epilogue is undesired. 951 CM_ScalarEpilogueNotNeededUsePredicate 952 }; 953 954 /// LoopVectorizationCostModel - estimates the expected speedups due to 955 /// vectorization. 956 /// In many cases vectorization is not profitable. This can happen because of 957 /// a number of reasons. In this class we mainly attempt to predict the 958 /// expected speedup/slowdowns due to the supported instruction set. We use the 959 /// TargetTransformInfo to query the different backends for the cost of 960 /// different operations. 961 class LoopVectorizationCostModel { 962 public: 963 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 964 PredicatedScalarEvolution &PSE, LoopInfo *LI, 965 LoopVectorizationLegality *Legal, 966 const TargetTransformInfo &TTI, 967 const TargetLibraryInfo *TLI, DemandedBits *DB, 968 AssumptionCache *AC, 969 OptimizationRemarkEmitter *ORE, const Function *F, 970 const LoopVectorizeHints *Hints, 971 InterleavedAccessInfo &IAI) 972 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 973 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 974 Hints(Hints), InterleaveInfo(IAI) {} 975 976 /// \return An upper bound for the vectorization factor, or None if 977 /// vectorization and interleaving should be avoided up front. 978 Optional<unsigned> computeMaxVF(); 979 980 /// \return True if runtime checks are required for vectorization, and false 981 /// otherwise. 982 bool runtimeChecksRequired(); 983 984 /// \return The most profitable vectorization factor and the cost of that VF. 985 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 986 /// then this vectorization factor will be selected if vectorization is 987 /// possible. 988 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 989 990 /// Setup cost-based decisions for user vectorization factor. 991 void selectUserVectorizationFactor(unsigned UserVF) { 992 collectUniformsAndScalars(UserVF); 993 collectInstsToScalarize(UserVF); 994 } 995 996 /// \return The size (in bits) of the smallest and widest types in the code 997 /// that needs to be vectorized. We ignore values that remain scalar such as 998 /// 64 bit loop indices. 999 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1000 1001 /// \return The desired interleave count. 1002 /// If interleave count has been specified by metadata it will be returned. 1003 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1004 /// are the selected vectorization factor and the cost of the selected VF. 1005 unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost); 1006 1007 /// Memory access instruction may be vectorized in more than one way. 1008 /// Form of instruction after vectorization depends on cost. 1009 /// This function takes cost-based decisions for Load/Store instructions 1010 /// and collects them in a map. This decisions map is used for building 1011 /// the lists of loop-uniform and loop-scalar instructions. 1012 /// The calculated cost is saved with widening decision in order to 1013 /// avoid redundant calculations. 1014 void setCostBasedWideningDecision(unsigned VF); 1015 1016 /// A struct that represents some properties of the register usage 1017 /// of a loop. 1018 struct RegisterUsage { 1019 /// Holds the number of loop invariant values that are used in the loop. 1020 /// The key is ClassID of target-provided register class. 1021 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1022 /// Holds the maximum number of concurrent live intervals in the loop. 1023 /// The key is ClassID of target-provided register class. 1024 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1025 }; 1026 1027 /// \return Returns information about the register usages of the loop for the 1028 /// given vectorization factors. 1029 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1030 1031 /// Collect values we want to ignore in the cost model. 1032 void collectValuesToIgnore(); 1033 1034 /// \returns The smallest bitwidth each instruction can be represented with. 1035 /// The vector equivalents of these instructions should be truncated to this 1036 /// type. 1037 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1038 return MinBWs; 1039 } 1040 1041 /// \returns True if it is more profitable to scalarize instruction \p I for 1042 /// vectorization factor \p VF. 1043 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1044 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 1045 1046 // Cost model is not run in the VPlan-native path - return conservative 1047 // result until this changes. 1048 if (EnableVPlanNativePath) 1049 return false; 1050 1051 auto Scalars = InstsToScalarize.find(VF); 1052 assert(Scalars != InstsToScalarize.end() && 1053 "VF not yet analyzed for scalarization profitability"); 1054 return Scalars->second.find(I) != Scalars->second.end(); 1055 } 1056 1057 /// Returns true if \p I is known to be uniform after vectorization. 1058 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1059 if (VF == 1) 1060 return true; 1061 1062 // Cost model is not run in the VPlan-native path - return conservative 1063 // result until this changes. 1064 if (EnableVPlanNativePath) 1065 return false; 1066 1067 auto UniformsPerVF = Uniforms.find(VF); 1068 assert(UniformsPerVF != Uniforms.end() && 1069 "VF not yet analyzed for uniformity"); 1070 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 1071 } 1072 1073 /// Returns true if \p I is known to be scalar after vectorization. 1074 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1075 if (VF == 1) 1076 return true; 1077 1078 // Cost model is not run in the VPlan-native path - return conservative 1079 // result until this changes. 1080 if (EnableVPlanNativePath) 1081 return false; 1082 1083 auto ScalarsPerVF = Scalars.find(VF); 1084 assert(ScalarsPerVF != Scalars.end() && 1085 "Scalar values are not calculated for VF"); 1086 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 1087 } 1088 1089 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1090 /// for vectorization factor \p VF. 1091 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1092 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 1093 !isProfitableToScalarize(I, VF) && 1094 !isScalarAfterVectorization(I, VF); 1095 } 1096 1097 /// Decision that was taken during cost calculation for memory instruction. 1098 enum InstWidening { 1099 CM_Unknown, 1100 CM_Widen, // For consecutive accesses with stride +1. 1101 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1102 CM_Interleave, 1103 CM_GatherScatter, 1104 CM_Scalarize 1105 }; 1106 1107 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1108 /// instruction \p I and vector width \p VF. 1109 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1110 unsigned Cost) { 1111 assert(VF >= 2 && "Expected VF >=2"); 1112 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1113 } 1114 1115 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1116 /// interleaving group \p Grp and vector width \p VF. 1117 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 1118 InstWidening W, unsigned Cost) { 1119 assert(VF >= 2 && "Expected VF >=2"); 1120 /// Broadcast this decicion to all instructions inside the group. 1121 /// But the cost will be assigned to one instruction only. 1122 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1123 if (auto *I = Grp->getMember(i)) { 1124 if (Grp->getInsertPos() == I) 1125 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1126 else 1127 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1128 } 1129 } 1130 } 1131 1132 /// Return the cost model decision for the given instruction \p I and vector 1133 /// width \p VF. Return CM_Unknown if this instruction did not pass 1134 /// through the cost modeling. 1135 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1136 assert(VF >= 2 && "Expected VF >=2"); 1137 1138 // Cost model is not run in the VPlan-native path - return conservative 1139 // result until this changes. 1140 if (EnableVPlanNativePath) 1141 return CM_GatherScatter; 1142 1143 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1144 auto Itr = WideningDecisions.find(InstOnVF); 1145 if (Itr == WideningDecisions.end()) 1146 return CM_Unknown; 1147 return Itr->second.first; 1148 } 1149 1150 /// Return the vectorization cost for the given instruction \p I and vector 1151 /// width \p VF. 1152 unsigned getWideningCost(Instruction *I, unsigned VF) { 1153 assert(VF >= 2 && "Expected VF >=2"); 1154 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1155 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1156 "The cost is not calculated"); 1157 return WideningDecisions[InstOnVF].second; 1158 } 1159 1160 /// Return True if instruction \p I is an optimizable truncate whose operand 1161 /// is an induction variable. Such a truncate will be removed by adding a new 1162 /// induction variable with the destination type. 1163 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1164 // If the instruction is not a truncate, return false. 1165 auto *Trunc = dyn_cast<TruncInst>(I); 1166 if (!Trunc) 1167 return false; 1168 1169 // Get the source and destination types of the truncate. 1170 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1171 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1172 1173 // If the truncate is free for the given types, return false. Replacing a 1174 // free truncate with an induction variable would add an induction variable 1175 // update instruction to each iteration of the loop. We exclude from this 1176 // check the primary induction variable since it will need an update 1177 // instruction regardless. 1178 Value *Op = Trunc->getOperand(0); 1179 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1180 return false; 1181 1182 // If the truncated value is not an induction variable, return false. 1183 return Legal->isInductionPhi(Op); 1184 } 1185 1186 /// Collects the instructions to scalarize for each predicated instruction in 1187 /// the loop. 1188 void collectInstsToScalarize(unsigned VF); 1189 1190 /// Collect Uniform and Scalar values for the given \p VF. 1191 /// The sets depend on CM decision for Load/Store instructions 1192 /// that may be vectorized as interleave, gather-scatter or scalarized. 1193 void collectUniformsAndScalars(unsigned VF) { 1194 // Do the analysis once. 1195 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1196 return; 1197 setCostBasedWideningDecision(VF); 1198 collectLoopUniforms(VF); 1199 collectLoopScalars(VF); 1200 } 1201 1202 /// Returns true if the target machine supports masked store operation 1203 /// for the given \p DataType and kind of access to \p Ptr. 1204 bool isLegalMaskedStore(Type *DataType, Value *Ptr, MaybeAlign Alignment) { 1205 return Legal->isConsecutivePtr(Ptr) && 1206 TTI.isLegalMaskedStore(DataType, Alignment); 1207 } 1208 1209 /// Returns true if the target machine supports masked load operation 1210 /// for the given \p DataType and kind of access to \p Ptr. 1211 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, MaybeAlign Alignment) { 1212 return Legal->isConsecutivePtr(Ptr) && 1213 TTI.isLegalMaskedLoad(DataType, Alignment); 1214 } 1215 1216 /// Returns true if the target machine supports masked scatter operation 1217 /// for the given \p DataType. 1218 bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) { 1219 return TTI.isLegalMaskedScatter(DataType, Alignment); 1220 } 1221 1222 /// Returns true if the target machine supports masked gather operation 1223 /// for the given \p DataType. 1224 bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) { 1225 return TTI.isLegalMaskedGather(DataType, Alignment); 1226 } 1227 1228 /// Returns true if the target machine can represent \p V as a masked gather 1229 /// or scatter operation. 1230 bool isLegalGatherOrScatter(Value *V) { 1231 bool LI = isa<LoadInst>(V); 1232 bool SI = isa<StoreInst>(V); 1233 if (!LI && !SI) 1234 return false; 1235 auto *Ty = getMemInstValueType(V); 1236 MaybeAlign Align = getLoadStoreAlignment(V); 1237 return (LI && isLegalMaskedGather(Ty, Align)) || 1238 (SI && isLegalMaskedScatter(Ty, Align)); 1239 } 1240 1241 /// Returns true if \p I is an instruction that will be scalarized with 1242 /// predication. Such instructions include conditional stores and 1243 /// instructions that may divide by zero. 1244 /// If a non-zero VF has been calculated, we check if I will be scalarized 1245 /// predication for that VF. 1246 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1247 1248 // Returns true if \p I is an instruction that will be predicated either 1249 // through scalar predication or masked load/store or masked gather/scatter. 1250 // Superset of instructions that return true for isScalarWithPredication. 1251 bool isPredicatedInst(Instruction *I) { 1252 if (!blockNeedsPredication(I->getParent())) 1253 return false; 1254 // Loads and stores that need some form of masked operation are predicated 1255 // instructions. 1256 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1257 return Legal->isMaskRequired(I); 1258 return isScalarWithPredication(I); 1259 } 1260 1261 /// Returns true if \p I is a memory instruction with consecutive memory 1262 /// access that can be widened. 1263 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1264 1265 /// Returns true if \p I is a memory instruction in an interleaved-group 1266 /// of memory accesses that can be vectorized with wide vector loads/stores 1267 /// and shuffles. 1268 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1269 1270 /// Check if \p Instr belongs to any interleaved access group. 1271 bool isAccessInterleaved(Instruction *Instr) { 1272 return InterleaveInfo.isInterleaved(Instr); 1273 } 1274 1275 /// Get the interleaved access group that \p Instr belongs to. 1276 const InterleaveGroup<Instruction> * 1277 getInterleavedAccessGroup(Instruction *Instr) { 1278 return InterleaveInfo.getInterleaveGroup(Instr); 1279 } 1280 1281 /// Returns true if an interleaved group requires a scalar iteration 1282 /// to handle accesses with gaps, and there is nothing preventing us from 1283 /// creating a scalar epilogue. 1284 bool requiresScalarEpilogue() const { 1285 return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue(); 1286 } 1287 1288 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1289 /// loop hint annotation. 1290 bool isScalarEpilogueAllowed() const { 1291 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1292 } 1293 1294 /// Returns true if all loop blocks should be masked to fold tail loop. 1295 bool foldTailByMasking() const { return FoldTailByMasking; } 1296 1297 bool blockNeedsPredication(BasicBlock *BB) { 1298 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1299 } 1300 1301 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1302 /// with factor VF. Return the cost of the instruction, including 1303 /// scalarization overhead if it's needed. 1304 unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF); 1305 1306 /// Estimate cost of a call instruction CI if it were vectorized with factor 1307 /// VF. Return the cost of the instruction, including scalarization overhead 1308 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1309 /// scalarized - 1310 /// i.e. either vector version isn't available, or is too expensive. 1311 unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize); 1312 1313 /// Invalidates decisions already taken by the cost model. 1314 void invalidateCostModelingDecisions() { 1315 WideningDecisions.clear(); 1316 Uniforms.clear(); 1317 Scalars.clear(); 1318 } 1319 1320 private: 1321 unsigned NumPredStores = 0; 1322 1323 /// \return An upper bound for the vectorization factor, larger than zero. 1324 /// One is returned if vectorization should best be avoided due to cost. 1325 unsigned computeFeasibleMaxVF(unsigned ConstTripCount); 1326 1327 /// The vectorization cost is a combination of the cost itself and a boolean 1328 /// indicating whether any of the contributing operations will actually 1329 /// operate on 1330 /// vector values after type legalization in the backend. If this latter value 1331 /// is 1332 /// false, then all operations will be scalarized (i.e. no vectorization has 1333 /// actually taken place). 1334 using VectorizationCostTy = std::pair<unsigned, bool>; 1335 1336 /// Returns the expected execution cost. The unit of the cost does 1337 /// not matter because we use the 'cost' units to compare different 1338 /// vector widths. The cost that is returned is *not* normalized by 1339 /// the factor width. 1340 VectorizationCostTy expectedCost(unsigned VF); 1341 1342 /// Returns the execution time cost of an instruction for a given vector 1343 /// width. Vector width of one means scalar. 1344 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1345 1346 /// The cost-computation logic from getInstructionCost which provides 1347 /// the vector type as an output parameter. 1348 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1349 1350 /// Calculate vectorization cost of memory instruction \p I. 1351 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1352 1353 /// The cost computation for scalarized memory instruction. 1354 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1355 1356 /// The cost computation for interleaving group of memory instructions. 1357 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1358 1359 /// The cost computation for Gather/Scatter instruction. 1360 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1361 1362 /// The cost computation for widening instruction \p I with consecutive 1363 /// memory access. 1364 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1365 1366 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1367 /// Load: scalar load + broadcast. 1368 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1369 /// element) 1370 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1371 1372 /// Estimate the overhead of scalarizing an instruction. This is a 1373 /// convenience wrapper for the type-based getScalarizationOverhead API. 1374 unsigned getScalarizationOverhead(Instruction *I, unsigned VF); 1375 1376 /// Returns whether the instruction is a load or store and will be a emitted 1377 /// as a vector operation. 1378 bool isConsecutiveLoadOrStore(Instruction *I); 1379 1380 /// Returns true if an artificially high cost for emulated masked memrefs 1381 /// should be used. 1382 bool useEmulatedMaskMemRefHack(Instruction *I); 1383 1384 /// Map of scalar integer values to the smallest bitwidth they can be legally 1385 /// represented as. The vector equivalents of these values should be truncated 1386 /// to this type. 1387 MapVector<Instruction *, uint64_t> MinBWs; 1388 1389 /// A type representing the costs for instructions if they were to be 1390 /// scalarized rather than vectorized. The entries are Instruction-Cost 1391 /// pairs. 1392 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1393 1394 /// A set containing all BasicBlocks that are known to present after 1395 /// vectorization as a predicated block. 1396 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1397 1398 /// Records whether it is allowed to have the original scalar loop execute at 1399 /// least once. This may be needed as a fallback loop in case runtime 1400 /// aliasing/dependence checks fail, or to handle the tail/remainder 1401 /// iterations when the trip count is unknown or doesn't divide by the VF, 1402 /// or as a peel-loop to handle gaps in interleave-groups. 1403 /// Under optsize and when the trip count is very small we don't allow any 1404 /// iterations to execute in the scalar loop. 1405 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1406 1407 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1408 bool FoldTailByMasking = false; 1409 1410 /// A map holding scalar costs for different vectorization factors. The 1411 /// presence of a cost for an instruction in the mapping indicates that the 1412 /// instruction will be scalarized when vectorizing with the associated 1413 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1414 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1415 1416 /// Holds the instructions known to be uniform after vectorization. 1417 /// The data is collected per VF. 1418 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1419 1420 /// Holds the instructions known to be scalar after vectorization. 1421 /// The data is collected per VF. 1422 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1423 1424 /// Holds the instructions (address computations) that are forced to be 1425 /// scalarized. 1426 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1427 1428 /// Returns the expected difference in cost from scalarizing the expression 1429 /// feeding a predicated instruction \p PredInst. The instructions to 1430 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1431 /// non-negative return value implies the expression will be scalarized. 1432 /// Currently, only single-use chains are considered for scalarization. 1433 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1434 unsigned VF); 1435 1436 /// Collect the instructions that are uniform after vectorization. An 1437 /// instruction is uniform if we represent it with a single scalar value in 1438 /// the vectorized loop corresponding to each vector iteration. Examples of 1439 /// uniform instructions include pointer operands of consecutive or 1440 /// interleaved memory accesses. Note that although uniformity implies an 1441 /// instruction will be scalar, the reverse is not true. In general, a 1442 /// scalarized instruction will be represented by VF scalar values in the 1443 /// vectorized loop, each corresponding to an iteration of the original 1444 /// scalar loop. 1445 void collectLoopUniforms(unsigned VF); 1446 1447 /// Collect the instructions that are scalar after vectorization. An 1448 /// instruction is scalar if it is known to be uniform or will be scalarized 1449 /// during vectorization. Non-uniform scalarized instructions will be 1450 /// represented by VF values in the vectorized loop, each corresponding to an 1451 /// iteration of the original scalar loop. 1452 void collectLoopScalars(unsigned VF); 1453 1454 /// Keeps cost model vectorization decision and cost for instructions. 1455 /// Right now it is used for memory instructions only. 1456 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1457 std::pair<InstWidening, unsigned>>; 1458 1459 DecisionList WideningDecisions; 1460 1461 /// Returns true if \p V is expected to be vectorized and it needs to be 1462 /// extracted. 1463 bool needsExtract(Value *V, unsigned VF) const { 1464 Instruction *I = dyn_cast<Instruction>(V); 1465 if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I)) 1466 return false; 1467 1468 // Assume we can vectorize V (and hence we need extraction) if the 1469 // scalars are not computed yet. This can happen, because it is called 1470 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1471 // the scalars are collected. That should be a safe assumption in most 1472 // cases, because we check if the operands have vectorizable types 1473 // beforehand in LoopVectorizationLegality. 1474 return Scalars.find(VF) == Scalars.end() || 1475 !isScalarAfterVectorization(I, VF); 1476 }; 1477 1478 /// Returns a range containing only operands needing to be extracted. 1479 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1480 unsigned VF) { 1481 return SmallVector<Value *, 4>(make_filter_range( 1482 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1483 } 1484 1485 public: 1486 /// The loop that we evaluate. 1487 Loop *TheLoop; 1488 1489 /// Predicated scalar evolution analysis. 1490 PredicatedScalarEvolution &PSE; 1491 1492 /// Loop Info analysis. 1493 LoopInfo *LI; 1494 1495 /// Vectorization legality. 1496 LoopVectorizationLegality *Legal; 1497 1498 /// Vector target information. 1499 const TargetTransformInfo &TTI; 1500 1501 /// Target Library Info. 1502 const TargetLibraryInfo *TLI; 1503 1504 /// Demanded bits analysis. 1505 DemandedBits *DB; 1506 1507 /// Assumption cache. 1508 AssumptionCache *AC; 1509 1510 /// Interface to emit optimization remarks. 1511 OptimizationRemarkEmitter *ORE; 1512 1513 const Function *TheFunction; 1514 1515 /// Loop Vectorize Hint. 1516 const LoopVectorizeHints *Hints; 1517 1518 /// The interleave access information contains groups of interleaved accesses 1519 /// with the same stride and close to each other. 1520 InterleavedAccessInfo &InterleaveInfo; 1521 1522 /// Values to ignore in the cost model. 1523 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1524 1525 /// Values to ignore in the cost model when VF > 1. 1526 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1527 }; 1528 1529 } // end namespace llvm 1530 1531 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1532 // vectorization. The loop needs to be annotated with #pragma omp simd 1533 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1534 // vector length information is not provided, vectorization is not considered 1535 // explicit. Interleave hints are not allowed either. These limitations will be 1536 // relaxed in the future. 1537 // Please, note that we are currently forced to abuse the pragma 'clang 1538 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1539 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1540 // provides *explicit vectorization hints* (LV can bypass legal checks and 1541 // assume that vectorization is legal). However, both hints are implemented 1542 // using the same metadata (llvm.loop.vectorize, processed by 1543 // LoopVectorizeHints). This will be fixed in the future when the native IR 1544 // representation for pragma 'omp simd' is introduced. 1545 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1546 OptimizationRemarkEmitter *ORE) { 1547 assert(!OuterLp->empty() && "This is not an outer loop"); 1548 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1549 1550 // Only outer loops with an explicit vectorization hint are supported. 1551 // Unannotated outer loops are ignored. 1552 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1553 return false; 1554 1555 Function *Fn = OuterLp->getHeader()->getParent(); 1556 if (!Hints.allowVectorization(Fn, OuterLp, 1557 true /*VectorizeOnlyWhenForced*/)) { 1558 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1559 return false; 1560 } 1561 1562 if (Hints.getInterleave() > 1) { 1563 // TODO: Interleave support is future work. 1564 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1565 "outer loops.\n"); 1566 Hints.emitRemarkWithHints(); 1567 return false; 1568 } 1569 1570 return true; 1571 } 1572 1573 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1574 OptimizationRemarkEmitter *ORE, 1575 SmallVectorImpl<Loop *> &V) { 1576 // Collect inner loops and outer loops without irreducible control flow. For 1577 // now, only collect outer loops that have explicit vectorization hints. If we 1578 // are stress testing the VPlan H-CFG construction, we collect the outermost 1579 // loop of every loop nest. 1580 if (L.empty() || VPlanBuildStressTest || 1581 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1582 LoopBlocksRPO RPOT(&L); 1583 RPOT.perform(LI); 1584 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1585 V.push_back(&L); 1586 // TODO: Collect inner loops inside marked outer loops in case 1587 // vectorization fails for the outer loop. Do not invoke 1588 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1589 // already known to be reducible. We can use an inherited attribute for 1590 // that. 1591 return; 1592 } 1593 } 1594 for (Loop *InnerL : L) 1595 collectSupportedLoops(*InnerL, LI, ORE, V); 1596 } 1597 1598 namespace { 1599 1600 /// The LoopVectorize Pass. 1601 struct LoopVectorize : public FunctionPass { 1602 /// Pass identification, replacement for typeid 1603 static char ID; 1604 1605 LoopVectorizePass Impl; 1606 1607 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1608 bool VectorizeOnlyWhenForced = false) 1609 : FunctionPass(ID), 1610 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1611 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1612 } 1613 1614 bool runOnFunction(Function &F) override { 1615 if (skipFunction(F)) 1616 return false; 1617 1618 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1619 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1620 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1621 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1622 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1623 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1624 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1625 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1626 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1627 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1628 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1629 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1630 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1631 1632 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1633 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1634 1635 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1636 GetLAA, *ORE, PSI).MadeAnyChange; 1637 } 1638 1639 void getAnalysisUsage(AnalysisUsage &AU) const override { 1640 AU.addRequired<AssumptionCacheTracker>(); 1641 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1642 AU.addRequired<DominatorTreeWrapperPass>(); 1643 AU.addRequired<LoopInfoWrapperPass>(); 1644 AU.addRequired<ScalarEvolutionWrapperPass>(); 1645 AU.addRequired<TargetTransformInfoWrapperPass>(); 1646 AU.addRequired<AAResultsWrapperPass>(); 1647 AU.addRequired<LoopAccessLegacyAnalysis>(); 1648 AU.addRequired<DemandedBitsWrapperPass>(); 1649 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1650 AU.addRequired<InjectTLIMappingsLegacy>(); 1651 1652 // We currently do not preserve loopinfo/dominator analyses with outer loop 1653 // vectorization. Until this is addressed, mark these analyses as preserved 1654 // only for non-VPlan-native path. 1655 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1656 if (!EnableVPlanNativePath) { 1657 AU.addPreserved<LoopInfoWrapperPass>(); 1658 AU.addPreserved<DominatorTreeWrapperPass>(); 1659 } 1660 1661 AU.addPreserved<BasicAAWrapperPass>(); 1662 AU.addPreserved<GlobalsAAWrapperPass>(); 1663 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1664 } 1665 }; 1666 1667 } // end anonymous namespace 1668 1669 //===----------------------------------------------------------------------===// 1670 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1671 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1672 //===----------------------------------------------------------------------===// 1673 1674 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1675 // We need to place the broadcast of invariant variables outside the loop, 1676 // but only if it's proven safe to do so. Else, broadcast will be inside 1677 // vector loop body. 1678 Instruction *Instr = dyn_cast<Instruction>(V); 1679 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1680 (!Instr || 1681 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1682 // Place the code for broadcasting invariant variables in the new preheader. 1683 IRBuilder<>::InsertPointGuard Guard(Builder); 1684 if (SafeToHoist) 1685 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1686 1687 // Broadcast the scalar into all locations in the vector. 1688 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1689 1690 return Shuf; 1691 } 1692 1693 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1694 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1695 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1696 "Expected either an induction phi-node or a truncate of it!"); 1697 Value *Start = II.getStartValue(); 1698 1699 // Construct the initial value of the vector IV in the vector loop preheader 1700 auto CurrIP = Builder.saveIP(); 1701 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1702 if (isa<TruncInst>(EntryVal)) { 1703 assert(Start->getType()->isIntegerTy() && 1704 "Truncation requires an integer type"); 1705 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1706 Step = Builder.CreateTrunc(Step, TruncType); 1707 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1708 } 1709 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1710 Value *SteppedStart = 1711 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1712 1713 // We create vector phi nodes for both integer and floating-point induction 1714 // variables. Here, we determine the kind of arithmetic we will perform. 1715 Instruction::BinaryOps AddOp; 1716 Instruction::BinaryOps MulOp; 1717 if (Step->getType()->isIntegerTy()) { 1718 AddOp = Instruction::Add; 1719 MulOp = Instruction::Mul; 1720 } else { 1721 AddOp = II.getInductionOpcode(); 1722 MulOp = Instruction::FMul; 1723 } 1724 1725 // Multiply the vectorization factor by the step using integer or 1726 // floating-point arithmetic as appropriate. 1727 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1728 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1729 1730 // Create a vector splat to use in the induction update. 1731 // 1732 // FIXME: If the step is non-constant, we create the vector splat with 1733 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1734 // handle a constant vector splat. 1735 Value *SplatVF = 1736 isa<Constant>(Mul) 1737 ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul)) 1738 : Builder.CreateVectorSplat(VF, Mul); 1739 Builder.restoreIP(CurrIP); 1740 1741 // We may need to add the step a number of times, depending on the unroll 1742 // factor. The last of those goes into the PHI. 1743 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1744 &*LoopVectorBody->getFirstInsertionPt()); 1745 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1746 Instruction *LastInduction = VecInd; 1747 for (unsigned Part = 0; Part < UF; ++Part) { 1748 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1749 1750 if (isa<TruncInst>(EntryVal)) 1751 addMetadata(LastInduction, EntryVal); 1752 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1753 1754 LastInduction = cast<Instruction>(addFastMathFlag( 1755 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1756 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1757 } 1758 1759 // Move the last step to the end of the latch block. This ensures consistent 1760 // placement of all induction updates. 1761 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1762 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1763 auto *ICmp = cast<Instruction>(Br->getCondition()); 1764 LastInduction->moveBefore(ICmp); 1765 LastInduction->setName("vec.ind.next"); 1766 1767 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1768 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1769 } 1770 1771 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1772 return Cost->isScalarAfterVectorization(I, VF) || 1773 Cost->isProfitableToScalarize(I, VF); 1774 } 1775 1776 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1777 if (shouldScalarizeInstruction(IV)) 1778 return true; 1779 auto isScalarInst = [&](User *U) -> bool { 1780 auto *I = cast<Instruction>(U); 1781 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1782 }; 1783 return llvm::any_of(IV->users(), isScalarInst); 1784 } 1785 1786 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1787 const InductionDescriptor &ID, const Instruction *EntryVal, 1788 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1789 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1790 "Expected either an induction phi-node or a truncate of it!"); 1791 1792 // This induction variable is not the phi from the original loop but the 1793 // newly-created IV based on the proof that casted Phi is equal to the 1794 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1795 // re-uses the same InductionDescriptor that original IV uses but we don't 1796 // have to do any recording in this case - that is done when original IV is 1797 // processed. 1798 if (isa<TruncInst>(EntryVal)) 1799 return; 1800 1801 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1802 if (Casts.empty()) 1803 return; 1804 // Only the first Cast instruction in the Casts vector is of interest. 1805 // The rest of the Casts (if exist) have no uses outside the 1806 // induction update chain itself. 1807 Instruction *CastInst = *Casts.begin(); 1808 if (Lane < UINT_MAX) 1809 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1810 else 1811 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1812 } 1813 1814 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1815 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1816 "Primary induction variable must have an integer type"); 1817 1818 auto II = Legal->getInductionVars().find(IV); 1819 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 1820 1821 auto ID = II->second; 1822 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1823 1824 // The value from the original loop to which we are mapping the new induction 1825 // variable. 1826 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1827 1828 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1829 1830 // Generate code for the induction step. Note that induction steps are 1831 // required to be loop-invariant 1832 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 1833 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 1834 "Induction step should be loop invariant"); 1835 if (PSE.getSE()->isSCEVable(IV->getType())) { 1836 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1837 return Exp.expandCodeFor(Step, Step->getType(), 1838 LoopVectorPreHeader->getTerminator()); 1839 } 1840 return cast<SCEVUnknown>(Step)->getValue(); 1841 }; 1842 1843 // The scalar value to broadcast. This is derived from the canonical 1844 // induction variable. If a truncation type is given, truncate the canonical 1845 // induction variable and step. Otherwise, derive these values from the 1846 // induction descriptor. 1847 auto CreateScalarIV = [&](Value *&Step) -> Value * { 1848 Value *ScalarIV = Induction; 1849 if (IV != OldInduction) { 1850 ScalarIV = IV->getType()->isIntegerTy() 1851 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1852 : Builder.CreateCast(Instruction::SIToFP, Induction, 1853 IV->getType()); 1854 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1855 ScalarIV->setName("offset.idx"); 1856 } 1857 if (Trunc) { 1858 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1859 assert(Step->getType()->isIntegerTy() && 1860 "Truncation requires an integer step"); 1861 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1862 Step = Builder.CreateTrunc(Step, TruncType); 1863 } 1864 return ScalarIV; 1865 }; 1866 1867 // Create the vector values from the scalar IV, in the absence of creating a 1868 // vector IV. 1869 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 1870 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1871 for (unsigned Part = 0; Part < UF; ++Part) { 1872 Value *EntryPart = 1873 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1874 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1875 if (Trunc) 1876 addMetadata(EntryPart, Trunc); 1877 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1878 } 1879 }; 1880 1881 // Now do the actual transformations, and start with creating the step value. 1882 Value *Step = CreateStepValue(ID.getStep()); 1883 if (VF <= 1) { 1884 Value *ScalarIV = CreateScalarIV(Step); 1885 CreateSplatIV(ScalarIV, Step); 1886 return; 1887 } 1888 1889 // Determine if we want a scalar version of the induction variable. This is 1890 // true if the induction variable itself is not widened, or if it has at 1891 // least one user in the loop that is not widened. 1892 auto NeedsScalarIV = needsScalarInduction(EntryVal); 1893 if (!NeedsScalarIV) { 1894 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1895 return; 1896 } 1897 1898 // Try to create a new independent vector induction variable. If we can't 1899 // create the phi node, we will splat the scalar induction variable in each 1900 // loop iteration. 1901 if (!shouldScalarizeInstruction(EntryVal)) { 1902 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1903 Value *ScalarIV = CreateScalarIV(Step); 1904 // Create scalar steps that can be used by instructions we will later 1905 // scalarize. Note that the addition of the scalar steps will not increase 1906 // the number of instructions in the loop in the common case prior to 1907 // InstCombine. We will be trading one vector extract for each scalar step. 1908 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1909 return; 1910 } 1911 1912 // If we haven't yet vectorized the induction variable, splat the scalar 1913 // induction variable, and build the necessary step vectors. 1914 // TODO: Don't do it unless the vectorized IV is really required. 1915 Value *ScalarIV = CreateScalarIV(Step); 1916 CreateSplatIV(ScalarIV, Step); 1917 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1918 } 1919 1920 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1921 Instruction::BinaryOps BinOp) { 1922 // Create and check the types. 1923 auto *ValVTy = cast<VectorType>(Val->getType()); 1924 int VLen = ValVTy->getNumElements(); 1925 1926 Type *STy = Val->getType()->getScalarType(); 1927 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1928 "Induction Step must be an integer or FP"); 1929 assert(Step->getType() == STy && "Step has wrong type"); 1930 1931 SmallVector<Constant *, 8> Indices; 1932 1933 if (STy->isIntegerTy()) { 1934 // Create a vector of consecutive numbers from zero to VF. 1935 for (int i = 0; i < VLen; ++i) 1936 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1937 1938 // Add the consecutive indices to the vector value. 1939 Constant *Cv = ConstantVector::get(Indices); 1940 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1941 Step = Builder.CreateVectorSplat(VLen, Step); 1942 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1943 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1944 // which can be found from the original scalar operations. 1945 Step = Builder.CreateMul(Cv, Step); 1946 return Builder.CreateAdd(Val, Step, "induction"); 1947 } 1948 1949 // Floating point induction. 1950 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1951 "Binary Opcode should be specified for FP induction"); 1952 // Create a vector of consecutive numbers from zero to VF. 1953 for (int i = 0; i < VLen; ++i) 1954 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1955 1956 // Add the consecutive indices to the vector value. 1957 Constant *Cv = ConstantVector::get(Indices); 1958 1959 Step = Builder.CreateVectorSplat(VLen, Step); 1960 1961 // Floating point operations had to be 'fast' to enable the induction. 1962 FastMathFlags Flags; 1963 Flags.setFast(); 1964 1965 Value *MulOp = Builder.CreateFMul(Cv, Step); 1966 if (isa<Instruction>(MulOp)) 1967 // Have to check, MulOp may be a constant 1968 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1969 1970 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1971 if (isa<Instruction>(BOp)) 1972 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1973 return BOp; 1974 } 1975 1976 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1977 Instruction *EntryVal, 1978 const InductionDescriptor &ID) { 1979 // We shouldn't have to build scalar steps if we aren't vectorizing. 1980 assert(VF > 1 && "VF should be greater than one"); 1981 1982 // Get the value type and ensure it and the step have the same integer type. 1983 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1984 assert(ScalarIVTy == Step->getType() && 1985 "Val and Step should have the same type"); 1986 1987 // We build scalar steps for both integer and floating-point induction 1988 // variables. Here, we determine the kind of arithmetic we will perform. 1989 Instruction::BinaryOps AddOp; 1990 Instruction::BinaryOps MulOp; 1991 if (ScalarIVTy->isIntegerTy()) { 1992 AddOp = Instruction::Add; 1993 MulOp = Instruction::Mul; 1994 } else { 1995 AddOp = ID.getInductionOpcode(); 1996 MulOp = Instruction::FMul; 1997 } 1998 1999 // Determine the number of scalars we need to generate for each unroll 2000 // iteration. If EntryVal is uniform, we only need to generate the first 2001 // lane. Otherwise, we generate all VF values. 2002 unsigned Lanes = 2003 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 2004 : VF; 2005 // Compute the scalar steps and save the results in VectorLoopValueMap. 2006 for (unsigned Part = 0; Part < UF; ++Part) { 2007 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2008 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2009 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2010 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2011 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2012 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 2013 } 2014 } 2015 } 2016 2017 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2018 assert(V != Induction && "The new induction variable should not be used."); 2019 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2020 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2021 2022 // If we have a stride that is replaced by one, do it here. Defer this for 2023 // the VPlan-native path until we start running Legal checks in that path. 2024 if (!EnableVPlanNativePath && Legal->hasStride(V)) 2025 V = ConstantInt::get(V->getType(), 1); 2026 2027 // If we have a vector mapped to this value, return it. 2028 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2029 return VectorLoopValueMap.getVectorValue(V, Part); 2030 2031 // If the value has not been vectorized, check if it has been scalarized 2032 // instead. If it has been scalarized, and we actually need the value in 2033 // vector form, we will construct the vector values on demand. 2034 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2035 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2036 2037 // If we've scalarized a value, that value should be an instruction. 2038 auto *I = cast<Instruction>(V); 2039 2040 // If we aren't vectorizing, we can just copy the scalar map values over to 2041 // the vector map. 2042 if (VF == 1) { 2043 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2044 return ScalarValue; 2045 } 2046 2047 // Get the last scalar instruction we generated for V and Part. If the value 2048 // is known to be uniform after vectorization, this corresponds to lane zero 2049 // of the Part unroll iteration. Otherwise, the last instruction is the one 2050 // we created for the last vector lane of the Part unroll iteration. 2051 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2052 auto *LastInst = cast<Instruction>( 2053 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2054 2055 // Set the insert point after the last scalarized instruction. This ensures 2056 // the insertelement sequence will directly follow the scalar definitions. 2057 auto OldIP = Builder.saveIP(); 2058 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2059 Builder.SetInsertPoint(&*NewIP); 2060 2061 // However, if we are vectorizing, we need to construct the vector values. 2062 // If the value is known to be uniform after vectorization, we can just 2063 // broadcast the scalar value corresponding to lane zero for each unroll 2064 // iteration. Otherwise, we construct the vector values using insertelement 2065 // instructions. Since the resulting vectors are stored in 2066 // VectorLoopValueMap, we will only generate the insertelements once. 2067 Value *VectorValue = nullptr; 2068 if (Cost->isUniformAfterVectorization(I, VF)) { 2069 VectorValue = getBroadcastInstrs(ScalarValue); 2070 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2071 } else { 2072 // Initialize packing with insertelements to start from undef. 2073 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 2074 VectorLoopValueMap.setVectorValue(V, Part, Undef); 2075 for (unsigned Lane = 0; Lane < VF; ++Lane) 2076 packScalarIntoVectorValue(V, {Part, Lane}); 2077 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2078 } 2079 Builder.restoreIP(OldIP); 2080 return VectorValue; 2081 } 2082 2083 // If this scalar is unknown, assume that it is a constant or that it is 2084 // loop invariant. Broadcast V and save the value for future uses. 2085 Value *B = getBroadcastInstrs(V); 2086 VectorLoopValueMap.setVectorValue(V, Part, B); 2087 return B; 2088 } 2089 2090 Value * 2091 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2092 const VPIteration &Instance) { 2093 // If the value is not an instruction contained in the loop, it should 2094 // already be scalar. 2095 if (OrigLoop->isLoopInvariant(V)) 2096 return V; 2097 2098 assert(Instance.Lane > 0 2099 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2100 : true && "Uniform values only have lane zero"); 2101 2102 // If the value from the original loop has not been vectorized, it is 2103 // represented by UF x VF scalar values in the new loop. Return the requested 2104 // scalar value. 2105 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2106 return VectorLoopValueMap.getScalarValue(V, Instance); 2107 2108 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2109 // for the given unroll part. If this entry is not a vector type (i.e., the 2110 // vectorization factor is one), there is no need to generate an 2111 // extractelement instruction. 2112 auto *U = getOrCreateVectorValue(V, Instance.Part); 2113 if (!U->getType()->isVectorTy()) { 2114 assert(VF == 1 && "Value not scalarized has non-vector type"); 2115 return U; 2116 } 2117 2118 // Otherwise, the value from the original loop has been vectorized and is 2119 // represented by UF vector values. Extract and return the requested scalar 2120 // value from the appropriate vector lane. 2121 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2122 } 2123 2124 void InnerLoopVectorizer::packScalarIntoVectorValue( 2125 Value *V, const VPIteration &Instance) { 2126 assert(V != Induction && "The new induction variable should not be used."); 2127 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2128 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2129 2130 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2131 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2132 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2133 Builder.getInt32(Instance.Lane)); 2134 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2135 } 2136 2137 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2138 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2139 SmallVector<int, 8> ShuffleMask; 2140 for (unsigned i = 0; i < VF; ++i) 2141 ShuffleMask.push_back(VF - i - 1); 2142 2143 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2144 ShuffleMask, "reverse"); 2145 } 2146 2147 // Return whether we allow using masked interleave-groups (for dealing with 2148 // strided loads/stores that reside in predicated blocks, or for dealing 2149 // with gaps). 2150 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2151 // If an override option has been passed in for interleaved accesses, use it. 2152 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2153 return EnableMaskedInterleavedMemAccesses; 2154 2155 return TTI.enableMaskedInterleavedAccessVectorization(); 2156 } 2157 2158 // Try to vectorize the interleave group that \p Instr belongs to. 2159 // 2160 // E.g. Translate following interleaved load group (factor = 3): 2161 // for (i = 0; i < N; i+=3) { 2162 // R = Pic[i]; // Member of index 0 2163 // G = Pic[i+1]; // Member of index 1 2164 // B = Pic[i+2]; // Member of index 2 2165 // ... // do something to R, G, B 2166 // } 2167 // To: 2168 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2169 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2170 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2171 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2172 // 2173 // Or translate following interleaved store group (factor = 3): 2174 // for (i = 0; i < N; i+=3) { 2175 // ... do something to R, G, B 2176 // Pic[i] = R; // Member of index 0 2177 // Pic[i+1] = G; // Member of index 1 2178 // Pic[i+2] = B; // Member of index 2 2179 // } 2180 // To: 2181 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2182 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2183 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2184 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2185 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2186 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2187 const InterleaveGroup<Instruction> *Group, VPTransformState &State, 2188 VPValue *Addr, VPValue *BlockInMask) { 2189 Instruction *Instr = Group->getInsertPos(); 2190 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2191 2192 // Prepare for the vector type of the interleaved load/store. 2193 Type *ScalarTy = getMemInstValueType(Instr); 2194 unsigned InterleaveFactor = Group->getFactor(); 2195 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2196 2197 // Prepare for the new pointers. 2198 SmallVector<Value *, 2> AddrParts; 2199 unsigned Index = Group->getIndex(Instr); 2200 2201 // TODO: extend the masked interleaved-group support to reversed access. 2202 assert((!BlockInMask || !Group->isReverse()) && 2203 "Reversed masked interleave-group not supported."); 2204 2205 // If the group is reverse, adjust the index to refer to the last vector lane 2206 // instead of the first. We adjust the index from the first vector lane, 2207 // rather than directly getting the pointer for lane VF - 1, because the 2208 // pointer operand of the interleaved access is supposed to be uniform. For 2209 // uniform instructions, we're only required to generate a value for the 2210 // first vector lane in each unroll iteration. 2211 if (Group->isReverse()) 2212 Index += (VF - 1) * Group->getFactor(); 2213 2214 for (unsigned Part = 0; Part < UF; Part++) { 2215 Value *AddrPart = State.get(Addr, {Part, 0}); 2216 setDebugLocFromInst(Builder, AddrPart); 2217 2218 // Notice current instruction could be any index. Need to adjust the address 2219 // to the member of index 0. 2220 // 2221 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2222 // b = A[i]; // Member of index 0 2223 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2224 // 2225 // E.g. A[i+1] = a; // Member of index 1 2226 // A[i] = b; // Member of index 0 2227 // A[i+2] = c; // Member of index 2 (Current instruction) 2228 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2229 2230 bool InBounds = false; 2231 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2232 InBounds = gep->isInBounds(); 2233 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2234 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2235 2236 // Cast to the vector pointer type. 2237 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2238 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2239 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2240 } 2241 2242 setDebugLocFromInst(Builder, Instr); 2243 Value *UndefVec = UndefValue::get(VecTy); 2244 2245 Value *MaskForGaps = nullptr; 2246 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2247 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2248 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2249 } 2250 2251 // Vectorize the interleaved load group. 2252 if (isa<LoadInst>(Instr)) { 2253 // For each unroll part, create a wide load for the group. 2254 SmallVector<Value *, 2> NewLoads; 2255 for (unsigned Part = 0; Part < UF; Part++) { 2256 Instruction *NewLoad; 2257 if (BlockInMask || MaskForGaps) { 2258 assert(useMaskedInterleavedAccesses(*TTI) && 2259 "masked interleaved groups are not allowed."); 2260 Value *GroupMask = MaskForGaps; 2261 if (BlockInMask) { 2262 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2263 auto *Undefs = UndefValue::get(BlockInMaskPart->getType()); 2264 Value *ShuffledMask = Builder.CreateShuffleVector( 2265 BlockInMaskPart, Undefs, 2266 createReplicatedMask(InterleaveFactor, VF), "interleaved.mask"); 2267 GroupMask = MaskForGaps 2268 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2269 MaskForGaps) 2270 : ShuffledMask; 2271 } 2272 NewLoad = 2273 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2274 GroupMask, UndefVec, "wide.masked.vec"); 2275 } 2276 else 2277 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2278 Group->getAlign(), "wide.vec"); 2279 Group->addMetadata(NewLoad); 2280 NewLoads.push_back(NewLoad); 2281 } 2282 2283 // For each member in the group, shuffle out the appropriate data from the 2284 // wide loads. 2285 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2286 Instruction *Member = Group->getMember(I); 2287 2288 // Skip the gaps in the group. 2289 if (!Member) 2290 continue; 2291 2292 auto StrideMask = createStrideMask(I, InterleaveFactor, VF); 2293 for (unsigned Part = 0; Part < UF; Part++) { 2294 Value *StridedVec = Builder.CreateShuffleVector( 2295 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2296 2297 // If this member has different type, cast the result type. 2298 if (Member->getType() != ScalarTy) { 2299 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2300 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2301 } 2302 2303 if (Group->isReverse()) 2304 StridedVec = reverseVector(StridedVec); 2305 2306 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2307 } 2308 } 2309 return; 2310 } 2311 2312 // The sub vector type for current instruction. 2313 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2314 2315 // Vectorize the interleaved store group. 2316 for (unsigned Part = 0; Part < UF; Part++) { 2317 // Collect the stored vector from each member. 2318 SmallVector<Value *, 4> StoredVecs; 2319 for (unsigned i = 0; i < InterleaveFactor; i++) { 2320 // Interleaved store group doesn't allow a gap, so each index has a member 2321 Instruction *Member = Group->getMember(i); 2322 assert(Member && "Fail to get a member from an interleaved store group"); 2323 2324 Value *StoredVec = getOrCreateVectorValue( 2325 cast<StoreInst>(Member)->getValueOperand(), Part); 2326 if (Group->isReverse()) 2327 StoredVec = reverseVector(StoredVec); 2328 2329 // If this member has different type, cast it to a unified type. 2330 2331 if (StoredVec->getType() != SubVT) 2332 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2333 2334 StoredVecs.push_back(StoredVec); 2335 } 2336 2337 // Concatenate all vectors into a wide vector. 2338 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2339 2340 // Interleave the elements in the wide vector. 2341 Value *IVec = Builder.CreateShuffleVector( 2342 WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor), 2343 "interleaved.vec"); 2344 2345 Instruction *NewStoreInstr; 2346 if (BlockInMask) { 2347 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2348 auto *Undefs = UndefValue::get(BlockInMaskPart->getType()); 2349 Value *ShuffledMask = Builder.CreateShuffleVector( 2350 BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF), 2351 "interleaved.mask"); 2352 NewStoreInstr = Builder.CreateMaskedStore( 2353 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2354 } 2355 else 2356 NewStoreInstr = 2357 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2358 2359 Group->addMetadata(NewStoreInstr); 2360 } 2361 } 2362 2363 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2364 VPTransformState &State, 2365 VPValue *Addr, 2366 VPValue *StoredValue, 2367 VPValue *BlockInMask) { 2368 // Attempt to issue a wide load. 2369 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2370 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2371 2372 assert((LI || SI) && "Invalid Load/Store instruction"); 2373 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2374 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2375 2376 LoopVectorizationCostModel::InstWidening Decision = 2377 Cost->getWideningDecision(Instr, VF); 2378 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2379 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2380 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2381 "CM decision is not to widen the memory instruction"); 2382 2383 Type *ScalarDataTy = getMemInstValueType(Instr); 2384 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2385 // An alignment of 0 means target abi alignment. We need to use the scalar's 2386 // target abi alignment in such a case. 2387 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2388 const Align Alignment = 2389 DL.getValueOrABITypeAlignment(getLoadStoreAlignment(Instr), ScalarDataTy); 2390 2391 // Determine if the pointer operand of the access is either consecutive or 2392 // reverse consecutive. 2393 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2394 bool ConsecutiveStride = 2395 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2396 bool CreateGatherScatter = 2397 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2398 2399 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2400 // gather/scatter. Otherwise Decision should have been to Scalarize. 2401 assert((ConsecutiveStride || CreateGatherScatter) && 2402 "The instruction should be scalarized"); 2403 (void)ConsecutiveStride; 2404 2405 VectorParts BlockInMaskParts(UF); 2406 bool isMaskRequired = BlockInMask; 2407 if (isMaskRequired) 2408 for (unsigned Part = 0; Part < UF; ++Part) 2409 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2410 2411 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2412 // Calculate the pointer for the specific unroll-part. 2413 GetElementPtrInst *PartPtr = nullptr; 2414 2415 bool InBounds = false; 2416 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2417 InBounds = gep->isInBounds(); 2418 2419 if (Reverse) { 2420 // If the address is consecutive but reversed, then the 2421 // wide store needs to start at the last vector element. 2422 PartPtr = cast<GetElementPtrInst>( 2423 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF))); 2424 PartPtr->setIsInBounds(InBounds); 2425 PartPtr = cast<GetElementPtrInst>( 2426 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF))); 2427 PartPtr->setIsInBounds(InBounds); 2428 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2429 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2430 } else { 2431 PartPtr = cast<GetElementPtrInst>( 2432 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF))); 2433 PartPtr->setIsInBounds(InBounds); 2434 } 2435 2436 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2437 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2438 }; 2439 2440 // Handle Stores: 2441 if (SI) { 2442 setDebugLocFromInst(Builder, SI); 2443 2444 for (unsigned Part = 0; Part < UF; ++Part) { 2445 Instruction *NewSI = nullptr; 2446 Value *StoredVal = State.get(StoredValue, Part); 2447 if (CreateGatherScatter) { 2448 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2449 Value *VectorGep = State.get(Addr, Part); 2450 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2451 MaskPart); 2452 } else { 2453 if (Reverse) { 2454 // If we store to reverse consecutive memory locations, then we need 2455 // to reverse the order of elements in the stored value. 2456 StoredVal = reverseVector(StoredVal); 2457 // We don't want to update the value in the map as it might be used in 2458 // another expression. So don't call resetVectorValue(StoredVal). 2459 } 2460 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2461 if (isMaskRequired) 2462 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2463 BlockInMaskParts[Part]); 2464 else 2465 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2466 } 2467 addMetadata(NewSI, SI); 2468 } 2469 return; 2470 } 2471 2472 // Handle loads. 2473 assert(LI && "Must have a load instruction"); 2474 setDebugLocFromInst(Builder, LI); 2475 for (unsigned Part = 0; Part < UF; ++Part) { 2476 Value *NewLI; 2477 if (CreateGatherScatter) { 2478 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2479 Value *VectorGep = State.get(Addr, Part); 2480 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2481 nullptr, "wide.masked.gather"); 2482 addMetadata(NewLI, LI); 2483 } else { 2484 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2485 if (isMaskRequired) 2486 NewLI = Builder.CreateMaskedLoad( 2487 VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy), 2488 "wide.masked.load"); 2489 else 2490 NewLI = 2491 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2492 2493 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2494 addMetadata(NewLI, LI); 2495 if (Reverse) 2496 NewLI = reverseVector(NewLI); 2497 } 2498 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2499 } 2500 } 2501 2502 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2503 const VPIteration &Instance, 2504 bool IfPredicateInstr) { 2505 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2506 2507 setDebugLocFromInst(Builder, Instr); 2508 2509 // Does this instruction return a value ? 2510 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2511 2512 Instruction *Cloned = Instr->clone(); 2513 if (!IsVoidRetTy) 2514 Cloned->setName(Instr->getName() + ".cloned"); 2515 2516 // Replace the operands of the cloned instructions with their scalar 2517 // equivalents in the new loop. 2518 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2519 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2520 Cloned->setOperand(op, NewOp); 2521 } 2522 addNewMetadata(Cloned, Instr); 2523 2524 // Place the cloned scalar in the new loop. 2525 Builder.Insert(Cloned); 2526 2527 // Add the cloned scalar to the scalar map entry. 2528 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2529 2530 // If we just cloned a new assumption, add it the assumption cache. 2531 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2532 if (II->getIntrinsicID() == Intrinsic::assume) 2533 AC->registerAssumption(II); 2534 2535 // End if-block. 2536 if (IfPredicateInstr) 2537 PredicatedInstructions.push_back(Cloned); 2538 } 2539 2540 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2541 Value *End, Value *Step, 2542 Instruction *DL) { 2543 BasicBlock *Header = L->getHeader(); 2544 BasicBlock *Latch = L->getLoopLatch(); 2545 // As we're just creating this loop, it's possible no latch exists 2546 // yet. If so, use the header as this will be a single block loop. 2547 if (!Latch) 2548 Latch = Header; 2549 2550 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2551 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2552 setDebugLocFromInst(Builder, OldInst); 2553 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2554 2555 Builder.SetInsertPoint(Latch->getTerminator()); 2556 setDebugLocFromInst(Builder, OldInst); 2557 2558 // Create i+1 and fill the PHINode. 2559 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2560 Induction->addIncoming(Start, L->getLoopPreheader()); 2561 Induction->addIncoming(Next, Latch); 2562 // Create the compare. 2563 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2564 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2565 2566 // Now we have two terminators. Remove the old one from the block. 2567 Latch->getTerminator()->eraseFromParent(); 2568 2569 return Induction; 2570 } 2571 2572 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2573 if (TripCount) 2574 return TripCount; 2575 2576 assert(L && "Create Trip Count for null loop."); 2577 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2578 // Find the loop boundaries. 2579 ScalarEvolution *SE = PSE.getSE(); 2580 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2581 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2582 "Invalid loop count"); 2583 2584 Type *IdxTy = Legal->getWidestInductionType(); 2585 assert(IdxTy && "No type for induction"); 2586 2587 // The exit count might have the type of i64 while the phi is i32. This can 2588 // happen if we have an induction variable that is sign extended before the 2589 // compare. The only way that we get a backedge taken count is that the 2590 // induction variable was signed and as such will not overflow. In such a case 2591 // truncation is legal. 2592 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2593 IdxTy->getPrimitiveSizeInBits()) 2594 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2595 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2596 2597 // Get the total trip count from the count by adding 1. 2598 const SCEV *ExitCount = SE->getAddExpr( 2599 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2600 2601 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2602 2603 // Expand the trip count and place the new instructions in the preheader. 2604 // Notice that the pre-header does not change, only the loop body. 2605 SCEVExpander Exp(*SE, DL, "induction"); 2606 2607 // Count holds the overall loop count (N). 2608 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2609 L->getLoopPreheader()->getTerminator()); 2610 2611 if (TripCount->getType()->isPointerTy()) 2612 TripCount = 2613 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2614 L->getLoopPreheader()->getTerminator()); 2615 2616 return TripCount; 2617 } 2618 2619 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2620 if (VectorTripCount) 2621 return VectorTripCount; 2622 2623 Value *TC = getOrCreateTripCount(L); 2624 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2625 2626 Type *Ty = TC->getType(); 2627 Constant *Step = ConstantInt::get(Ty, VF * UF); 2628 2629 // If the tail is to be folded by masking, round the number of iterations N 2630 // up to a multiple of Step instead of rounding down. This is done by first 2631 // adding Step-1 and then rounding down. Note that it's ok if this addition 2632 // overflows: the vector induction variable will eventually wrap to zero given 2633 // that it starts at zero and its Step is a power of two; the loop will then 2634 // exit, with the last early-exit vector comparison also producing all-true. 2635 if (Cost->foldTailByMasking()) { 2636 assert(isPowerOf2_32(VF * UF) && 2637 "VF*UF must be a power of 2 when folding tail by masking"); 2638 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2639 } 2640 2641 // Now we need to generate the expression for the part of the loop that the 2642 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2643 // iterations are not required for correctness, or N - Step, otherwise. Step 2644 // is equal to the vectorization factor (number of SIMD elements) times the 2645 // unroll factor (number of SIMD instructions). 2646 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2647 2648 // If there is a non-reversed interleaved group that may speculatively access 2649 // memory out-of-bounds, we need to ensure that there will be at least one 2650 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2651 // the trip count, we set the remainder to be equal to the step. If the step 2652 // does not evenly divide the trip count, no adjustment is necessary since 2653 // there will already be scalar iterations. Note that the minimum iterations 2654 // check ensures that N >= Step. 2655 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2656 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2657 R = Builder.CreateSelect(IsZero, Step, R); 2658 } 2659 2660 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2661 2662 return VectorTripCount; 2663 } 2664 2665 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2666 const DataLayout &DL) { 2667 // Verify that V is a vector type with same number of elements as DstVTy. 2668 unsigned VF = DstVTy->getNumElements(); 2669 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2670 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2671 Type *SrcElemTy = SrcVecTy->getElementType(); 2672 Type *DstElemTy = DstVTy->getElementType(); 2673 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2674 "Vector elements must have same size"); 2675 2676 // Do a direct cast if element types are castable. 2677 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2678 return Builder.CreateBitOrPointerCast(V, DstVTy); 2679 } 2680 // V cannot be directly casted to desired vector type. 2681 // May happen when V is a floating point vector but DstVTy is a vector of 2682 // pointers or vice-versa. Handle this using a two-step bitcast using an 2683 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2684 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2685 "Only one type should be a pointer type"); 2686 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2687 "Only one type should be a floating point type"); 2688 Type *IntTy = 2689 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2690 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2691 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2692 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2693 } 2694 2695 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2696 BasicBlock *Bypass) { 2697 Value *Count = getOrCreateTripCount(L); 2698 // Reuse existing vector loop preheader for TC checks. 2699 // Note that new preheader block is generated for vector loop. 2700 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2701 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2702 2703 // Generate code to check if the loop's trip count is less than VF * UF, or 2704 // equal to it in case a scalar epilogue is required; this implies that the 2705 // vector trip count is zero. This check also covers the case where adding one 2706 // to the backedge-taken count overflowed leading to an incorrect trip count 2707 // of zero. In this case we will also jump to the scalar loop. 2708 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2709 : ICmpInst::ICMP_ULT; 2710 2711 // If tail is to be folded, vector loop takes care of all iterations. 2712 Value *CheckMinIters = Builder.getFalse(); 2713 if (!Cost->foldTailByMasking()) 2714 CheckMinIters = Builder.CreateICmp( 2715 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2716 "min.iters.check"); 2717 2718 // Create new preheader for vector loop. 2719 LoopVectorPreHeader = 2720 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2721 "vector.ph"); 2722 2723 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2724 DT->getNode(Bypass)->getIDom()) && 2725 "TC check is expected to dominate Bypass"); 2726 2727 // Update dominator for Bypass & LoopExit. 2728 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2729 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2730 2731 ReplaceInstWithInst( 2732 TCCheckBlock->getTerminator(), 2733 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2734 LoopBypassBlocks.push_back(TCCheckBlock); 2735 } 2736 2737 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2738 // Reuse existing vector loop preheader for SCEV checks. 2739 // Note that new preheader block is generated for vector loop. 2740 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 2741 2742 // Generate the code to check that the SCEV assumptions that we made. 2743 // We want the new basic block to start at the first instruction in a 2744 // sequence of instructions that form a check. 2745 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2746 "scev.check"); 2747 Value *SCEVCheck = Exp.expandCodeForPredicate( 2748 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 2749 2750 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2751 if (C->isZero()) 2752 return; 2753 2754 assert(!SCEVCheckBlock->getParent()->hasOptSize() && 2755 "Cannot SCEV check stride or overflow when optimizing for size"); 2756 2757 SCEVCheckBlock->setName("vector.scevcheck"); 2758 // Create new preheader for vector loop. 2759 LoopVectorPreHeader = 2760 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 2761 nullptr, "vector.ph"); 2762 2763 // Update dominator only if this is first RT check. 2764 if (LoopBypassBlocks.empty()) { 2765 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 2766 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 2767 } 2768 2769 ReplaceInstWithInst( 2770 SCEVCheckBlock->getTerminator(), 2771 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 2772 LoopBypassBlocks.push_back(SCEVCheckBlock); 2773 AddedSafetyChecks = true; 2774 } 2775 2776 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2777 // VPlan-native path does not do any analysis for runtime checks currently. 2778 if (EnableVPlanNativePath) 2779 return; 2780 2781 // Reuse existing vector loop preheader for runtime memory checks. 2782 // Note that new preheader block is generated for vector loop. 2783 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 2784 2785 // Generate the code that checks in runtime if arrays overlap. We put the 2786 // checks into a separate block to make the more common case of few elements 2787 // faster. 2788 auto *LAI = Legal->getLAI(); 2789 const auto &RtPtrChecking = *LAI->getRuntimePointerChecking(); 2790 if (!RtPtrChecking.Need) 2791 return; 2792 Instruction *FirstCheckInst; 2793 Instruction *MemRuntimeCheck; 2794 std::tie(FirstCheckInst, MemRuntimeCheck) = 2795 addRuntimeChecks(MemCheckBlock->getTerminator(), OrigLoop, 2796 RtPtrChecking.getChecks(), RtPtrChecking.getSE()); 2797 if (!MemRuntimeCheck) 2798 return; 2799 2800 if (MemCheckBlock->getParent()->hasOptSize()) { 2801 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 2802 "Cannot emit memory checks when optimizing for size, unless forced " 2803 "to vectorize."); 2804 ORE->emit([&]() { 2805 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 2806 L->getStartLoc(), L->getHeader()) 2807 << "Code-size may be reduced by not forcing " 2808 "vectorization, or by source-code modifications " 2809 "eliminating the need for runtime checks " 2810 "(e.g., adding 'restrict')."; 2811 }); 2812 } 2813 2814 MemCheckBlock->setName("vector.memcheck"); 2815 // Create new preheader for vector loop. 2816 LoopVectorPreHeader = 2817 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 2818 "vector.ph"); 2819 2820 // Update dominator only if this is first RT check. 2821 if (LoopBypassBlocks.empty()) { 2822 DT->changeImmediateDominator(Bypass, MemCheckBlock); 2823 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 2824 } 2825 2826 ReplaceInstWithInst( 2827 MemCheckBlock->getTerminator(), 2828 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck)); 2829 LoopBypassBlocks.push_back(MemCheckBlock); 2830 AddedSafetyChecks = true; 2831 2832 // We currently don't use LoopVersioning for the actual loop cloning but we 2833 // still use it to add the noalias metadata. 2834 LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2835 PSE.getSE()); 2836 LVer->prepareNoAliasMetadata(); 2837 } 2838 2839 Value *InnerLoopVectorizer::emitTransformedIndex( 2840 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2841 const InductionDescriptor &ID) const { 2842 2843 SCEVExpander Exp(*SE, DL, "induction"); 2844 auto Step = ID.getStep(); 2845 auto StartValue = ID.getStartValue(); 2846 assert(Index->getType() == Step->getType() && 2847 "Index type does not match StepValue type"); 2848 2849 // Note: the IR at this point is broken. We cannot use SE to create any new 2850 // SCEV and then expand it, hoping that SCEV's simplification will give us 2851 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2852 // lead to various SCEV crashes. So all we can do is to use builder and rely 2853 // on InstCombine for future simplifications. Here we handle some trivial 2854 // cases only. 2855 auto CreateAdd = [&B](Value *X, Value *Y) { 2856 assert(X->getType() == Y->getType() && "Types don't match!"); 2857 if (auto *CX = dyn_cast<ConstantInt>(X)) 2858 if (CX->isZero()) 2859 return Y; 2860 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2861 if (CY->isZero()) 2862 return X; 2863 return B.CreateAdd(X, Y); 2864 }; 2865 2866 auto CreateMul = [&B](Value *X, Value *Y) { 2867 assert(X->getType() == Y->getType() && "Types don't match!"); 2868 if (auto *CX = dyn_cast<ConstantInt>(X)) 2869 if (CX->isOne()) 2870 return Y; 2871 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2872 if (CY->isOne()) 2873 return X; 2874 return B.CreateMul(X, Y); 2875 }; 2876 2877 switch (ID.getKind()) { 2878 case InductionDescriptor::IK_IntInduction: { 2879 assert(Index->getType() == StartValue->getType() && 2880 "Index type does not match StartValue type"); 2881 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2882 return B.CreateSub(StartValue, Index); 2883 auto *Offset = CreateMul( 2884 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2885 return CreateAdd(StartValue, Offset); 2886 } 2887 case InductionDescriptor::IK_PtrInduction: { 2888 assert(isa<SCEVConstant>(Step) && 2889 "Expected constant step for pointer induction"); 2890 return B.CreateGEP( 2891 StartValue->getType()->getPointerElementType(), StartValue, 2892 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2893 &*B.GetInsertPoint()))); 2894 } 2895 case InductionDescriptor::IK_FpInduction: { 2896 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2897 auto InductionBinOp = ID.getInductionBinOp(); 2898 assert(InductionBinOp && 2899 (InductionBinOp->getOpcode() == Instruction::FAdd || 2900 InductionBinOp->getOpcode() == Instruction::FSub) && 2901 "Original bin op should be defined for FP induction"); 2902 2903 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2904 2905 // Floating point operations had to be 'fast' to enable the induction. 2906 FastMathFlags Flags; 2907 Flags.setFast(); 2908 2909 Value *MulExp = B.CreateFMul(StepValue, Index); 2910 if (isa<Instruction>(MulExp)) 2911 // We have to check, the MulExp may be a constant. 2912 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2913 2914 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2915 "induction"); 2916 if (isa<Instruction>(BOp)) 2917 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2918 2919 return BOp; 2920 } 2921 case InductionDescriptor::IK_NoInduction: 2922 return nullptr; 2923 } 2924 llvm_unreachable("invalid enum"); 2925 } 2926 2927 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2928 /* 2929 In this function we generate a new loop. The new loop will contain 2930 the vectorized instructions while the old loop will continue to run the 2931 scalar remainder. 2932 2933 [ ] <-- loop iteration number check. 2934 / | 2935 / v 2936 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2937 | / | 2938 | / v 2939 || [ ] <-- vector pre header. 2940 |/ | 2941 | v 2942 | [ ] \ 2943 | [ ]_| <-- vector loop. 2944 | | 2945 | v 2946 | -[ ] <--- middle-block. 2947 | / | 2948 | / v 2949 -|- >[ ] <--- new preheader. 2950 | | 2951 | v 2952 | [ ] \ 2953 | [ ]_| <-- old scalar loop to handle remainder. 2954 \ | 2955 \ v 2956 >[ ] <-- exit block. 2957 ... 2958 */ 2959 2960 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2961 2962 // Some loops have a single integer induction variable, while other loops 2963 // don't. One example is c++ iterators that often have multiple pointer 2964 // induction variables. In the code below we also support a case where we 2965 // don't have a single induction variable. 2966 // 2967 // We try to obtain an induction variable from the original loop as hard 2968 // as possible. However if we don't find one that: 2969 // - is an integer 2970 // - counts from zero, stepping by one 2971 // - is the size of the widest induction variable type 2972 // then we create a new one. 2973 OldInduction = Legal->getPrimaryInduction(); 2974 Type *IdxTy = Legal->getWidestInductionType(); 2975 2976 // Split the single block loop into the two loop structure described above. 2977 LoopScalarBody = OrigLoop->getHeader(); 2978 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 2979 LoopExitBlock = OrigLoop->getExitBlock(); 2980 assert(LoopExitBlock && "Must have an exit block"); 2981 assert(LoopVectorPreHeader && "Invalid loop structure"); 2982 2983 LoopMiddleBlock = 2984 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 2985 LI, nullptr, "middle.block"); 2986 LoopScalarPreHeader = 2987 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 2988 nullptr, "scalar.ph"); 2989 // We intentionally don't let SplitBlock to update LoopInfo since 2990 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 2991 // LoopVectorBody is explicitly added to the correct place few lines later. 2992 LoopVectorBody = 2993 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 2994 nullptr, nullptr, "vector.body"); 2995 2996 // Update dominator for loop exit. 2997 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 2998 2999 // Create and register the new vector loop. 3000 Loop *Lp = LI->AllocateLoop(); 3001 Loop *ParentLoop = OrigLoop->getParentLoop(); 3002 3003 // Insert the new loop into the loop nest and register the new basic blocks 3004 // before calling any utilities such as SCEV that require valid LoopInfo. 3005 if (ParentLoop) { 3006 ParentLoop->addChildLoop(Lp); 3007 } else { 3008 LI->addTopLevelLoop(Lp); 3009 } 3010 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3011 3012 // Find the loop boundaries. 3013 Value *Count = getOrCreateTripCount(Lp); 3014 3015 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3016 3017 // Now, compare the new count to zero. If it is zero skip the vector loop and 3018 // jump to the scalar loop. This check also covers the case where the 3019 // backedge-taken count is uint##_max: adding one to it will overflow leading 3020 // to an incorrect trip count of zero. In this (rare) case we will also jump 3021 // to the scalar loop. 3022 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3023 3024 // Generate the code to check any assumptions that we've made for SCEV 3025 // expressions. 3026 emitSCEVChecks(Lp, LoopScalarPreHeader); 3027 3028 // Generate the code that checks in runtime if arrays overlap. We put the 3029 // checks into a separate block to make the more common case of few elements 3030 // faster. 3031 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3032 3033 // Generate the induction variable. 3034 // The loop step is equal to the vectorization factor (num of SIMD elements) 3035 // times the unroll factor (num of SIMD instructions). 3036 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3037 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3038 Induction = 3039 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3040 getDebugLocFromInstOrOperands(OldInduction)); 3041 3042 // We are going to resume the execution of the scalar loop. 3043 // Go over all of the induction variables that we found and fix the 3044 // PHIs that are left in the scalar version of the loop. 3045 // The starting values of PHI nodes depend on the counter of the last 3046 // iteration in the vectorized loop. 3047 // If we come from a bypass edge then we need to start from the original 3048 // start value. 3049 3050 // This variable saves the new starting index for the scalar loop. It is used 3051 // to test if there are any tail iterations left once the vector loop has 3052 // completed. 3053 for (auto &InductionEntry : Legal->getInductionVars()) { 3054 PHINode *OrigPhi = InductionEntry.first; 3055 InductionDescriptor II = InductionEntry.second; 3056 3057 // Create phi nodes to merge from the backedge-taken check block. 3058 PHINode *BCResumeVal = 3059 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3060 LoopScalarPreHeader->getTerminator()); 3061 // Copy original phi DL over to the new one. 3062 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3063 Value *&EndValue = IVEndValues[OrigPhi]; 3064 if (OrigPhi == OldInduction) { 3065 // We know what the end value is. 3066 EndValue = CountRoundDown; 3067 } else { 3068 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 3069 Type *StepType = II.getStep()->getType(); 3070 Instruction::CastOps CastOp = 3071 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3072 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3073 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3074 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3075 EndValue->setName("ind.end"); 3076 } 3077 3078 // The new PHI merges the original incoming value, in case of a bypass, 3079 // or the value at the end of the vectorized loop. 3080 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3081 3082 // Fix the scalar body counter (PHI node). 3083 // The old induction's phi node in the scalar body needs the truncated 3084 // value. 3085 for (BasicBlock *BB : LoopBypassBlocks) 3086 BCResumeVal->addIncoming(II.getStartValue(), BB); 3087 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3088 } 3089 3090 // We need the OrigLoop (scalar loop part) latch terminator to help 3091 // produce correct debug info for the middle block BB instructions. 3092 // The legality check stage guarantees that the loop will have a single 3093 // latch. 3094 assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) && 3095 "Scalar loop latch terminator isn't a branch"); 3096 BranchInst *ScalarLatchBr = 3097 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()); 3098 3099 // Add a check in the middle block to see if we have completed 3100 // all of the iterations in the first vector loop. 3101 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3102 // If tail is to be folded, we know we don't need to run the remainder. 3103 Value *CmpN = Builder.getTrue(); 3104 if (!Cost->foldTailByMasking()) { 3105 CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3106 CountRoundDown, "cmp.n", 3107 LoopMiddleBlock->getTerminator()); 3108 3109 // Here we use the same DebugLoc as the scalar loop latch branch instead 3110 // of the corresponding compare because they may have ended up with 3111 // different line numbers and we want to avoid awkward line stepping while 3112 // debugging. Eg. if the compare has got a line number inside the loop. 3113 cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc()); 3114 } 3115 3116 BranchInst *BrInst = 3117 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN); 3118 BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc()); 3119 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3120 3121 // Get ready to start creating new instructions into the vectorized body. 3122 assert(LoopVectorPreHeader == Lp->getLoopPreheader() && 3123 "Inconsistent vector loop preheader"); 3124 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3125 3126 Optional<MDNode *> VectorizedLoopID = 3127 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3128 LLVMLoopVectorizeFollowupVectorized}); 3129 if (VectorizedLoopID.hasValue()) { 3130 Lp->setLoopID(VectorizedLoopID.getValue()); 3131 3132 // Do not setAlreadyVectorized if loop attributes have been defined 3133 // explicitly. 3134 return LoopVectorPreHeader; 3135 } 3136 3137 // Keep all loop hints from the original loop on the vector loop (we'll 3138 // replace the vectorizer-specific hints below). 3139 if (MDNode *LID = OrigLoop->getLoopID()) 3140 Lp->setLoopID(LID); 3141 3142 LoopVectorizeHints Hints(Lp, true, *ORE); 3143 Hints.setAlreadyVectorized(); 3144 3145 #ifdef EXPENSIVE_CHECKS 3146 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3147 LI->verify(*DT); 3148 #endif 3149 3150 return LoopVectorPreHeader; 3151 } 3152 3153 // Fix up external users of the induction variable. At this point, we are 3154 // in LCSSA form, with all external PHIs that use the IV having one input value, 3155 // coming from the remainder loop. We need those PHIs to also have a correct 3156 // value for the IV when arriving directly from the middle block. 3157 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3158 const InductionDescriptor &II, 3159 Value *CountRoundDown, Value *EndValue, 3160 BasicBlock *MiddleBlock) { 3161 // There are two kinds of external IV usages - those that use the value 3162 // computed in the last iteration (the PHI) and those that use the penultimate 3163 // value (the value that feeds into the phi from the loop latch). 3164 // We allow both, but they, obviously, have different values. 3165 3166 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3167 3168 DenseMap<Value *, Value *> MissingVals; 3169 3170 // An external user of the last iteration's value should see the value that 3171 // the remainder loop uses to initialize its own IV. 3172 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3173 for (User *U : PostInc->users()) { 3174 Instruction *UI = cast<Instruction>(U); 3175 if (!OrigLoop->contains(UI)) { 3176 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3177 MissingVals[UI] = EndValue; 3178 } 3179 } 3180 3181 // An external user of the penultimate value need to see EndValue - Step. 3182 // The simplest way to get this is to recompute it from the constituent SCEVs, 3183 // that is Start + (Step * (CRD - 1)). 3184 for (User *U : OrigPhi->users()) { 3185 auto *UI = cast<Instruction>(U); 3186 if (!OrigLoop->contains(UI)) { 3187 const DataLayout &DL = 3188 OrigLoop->getHeader()->getModule()->getDataLayout(); 3189 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3190 3191 IRBuilder<> B(MiddleBlock->getTerminator()); 3192 Value *CountMinusOne = B.CreateSub( 3193 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3194 Value *CMO = 3195 !II.getStep()->getType()->isIntegerTy() 3196 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3197 II.getStep()->getType()) 3198 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3199 CMO->setName("cast.cmo"); 3200 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3201 Escape->setName("ind.escape"); 3202 MissingVals[UI] = Escape; 3203 } 3204 } 3205 3206 for (auto &I : MissingVals) { 3207 PHINode *PHI = cast<PHINode>(I.first); 3208 // One corner case we have to handle is two IVs "chasing" each-other, 3209 // that is %IV2 = phi [...], [ %IV1, %latch ] 3210 // In this case, if IV1 has an external use, we need to avoid adding both 3211 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3212 // don't already have an incoming value for the middle block. 3213 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3214 PHI->addIncoming(I.second, MiddleBlock); 3215 } 3216 } 3217 3218 namespace { 3219 3220 struct CSEDenseMapInfo { 3221 static bool canHandle(const Instruction *I) { 3222 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3223 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3224 } 3225 3226 static inline Instruction *getEmptyKey() { 3227 return DenseMapInfo<Instruction *>::getEmptyKey(); 3228 } 3229 3230 static inline Instruction *getTombstoneKey() { 3231 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3232 } 3233 3234 static unsigned getHashValue(const Instruction *I) { 3235 assert(canHandle(I) && "Unknown instruction!"); 3236 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3237 I->value_op_end())); 3238 } 3239 3240 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3241 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3242 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3243 return LHS == RHS; 3244 return LHS->isIdenticalTo(RHS); 3245 } 3246 }; 3247 3248 } // end anonymous namespace 3249 3250 ///Perform cse of induction variable instructions. 3251 static void cse(BasicBlock *BB) { 3252 // Perform simple cse. 3253 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3254 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3255 Instruction *In = &*I++; 3256 3257 if (!CSEDenseMapInfo::canHandle(In)) 3258 continue; 3259 3260 // Check if we can replace this instruction with any of the 3261 // visited instructions. 3262 if (Instruction *V = CSEMap.lookup(In)) { 3263 In->replaceAllUsesWith(V); 3264 In->eraseFromParent(); 3265 continue; 3266 } 3267 3268 CSEMap[In] = In; 3269 } 3270 } 3271 3272 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, 3273 unsigned VF, 3274 bool &NeedToScalarize) { 3275 Function *F = CI->getCalledFunction(); 3276 Type *ScalarRetTy = CI->getType(); 3277 SmallVector<Type *, 4> Tys, ScalarTys; 3278 for (auto &ArgOp : CI->arg_operands()) 3279 ScalarTys.push_back(ArgOp->getType()); 3280 3281 // Estimate cost of scalarized vector call. The source operands are assumed 3282 // to be vectors, so we need to extract individual elements from there, 3283 // execute VF scalar calls, and then gather the result into the vector return 3284 // value. 3285 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, 3286 TTI::TCK_RecipThroughput); 3287 if (VF == 1) 3288 return ScalarCallCost; 3289 3290 // Compute corresponding vector type for return value and arguments. 3291 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3292 for (Type *ScalarTy : ScalarTys) 3293 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3294 3295 // Compute costs of unpacking argument values for the scalar calls and 3296 // packing the return values to a vector. 3297 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF); 3298 3299 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3300 3301 // If we can't emit a vector call for this function, then the currently found 3302 // cost is the cost we need to return. 3303 NeedToScalarize = true; 3304 VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/); 3305 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3306 3307 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3308 return Cost; 3309 3310 // If the corresponding vector cost is cheaper, return its cost. 3311 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, 3312 TTI::TCK_RecipThroughput); 3313 if (VectorCallCost < Cost) { 3314 NeedToScalarize = false; 3315 return VectorCallCost; 3316 } 3317 return Cost; 3318 } 3319 3320 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3321 unsigned VF) { 3322 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3323 assert(ID && "Expected intrinsic call!"); 3324 3325 FastMathFlags FMF; 3326 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3327 FMF = FPMO->getFastMathFlags(); 3328 3329 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3330 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF, 3331 TargetTransformInfo::TCK_RecipThroughput, 3332 CI); 3333 } 3334 3335 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3336 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3337 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3338 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3339 } 3340 3341 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3342 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3343 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3344 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3345 } 3346 3347 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3348 // For every instruction `I` in MinBWs, truncate the operands, create a 3349 // truncated version of `I` and reextend its result. InstCombine runs 3350 // later and will remove any ext/trunc pairs. 3351 SmallPtrSet<Value *, 4> Erased; 3352 for (const auto &KV : Cost->getMinimalBitwidths()) { 3353 // If the value wasn't vectorized, we must maintain the original scalar 3354 // type. The absence of the value from VectorLoopValueMap indicates that it 3355 // wasn't vectorized. 3356 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3357 continue; 3358 for (unsigned Part = 0; Part < UF; ++Part) { 3359 Value *I = getOrCreateVectorValue(KV.first, Part); 3360 if (Erased.find(I) != Erased.end() || I->use_empty() || 3361 !isa<Instruction>(I)) 3362 continue; 3363 Type *OriginalTy = I->getType(); 3364 Type *ScalarTruncatedTy = 3365 IntegerType::get(OriginalTy->getContext(), KV.second); 3366 Type *TruncatedTy = VectorType::get( 3367 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements()); 3368 if (TruncatedTy == OriginalTy) 3369 continue; 3370 3371 IRBuilder<> B(cast<Instruction>(I)); 3372 auto ShrinkOperand = [&](Value *V) -> Value * { 3373 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3374 if (ZI->getSrcTy() == TruncatedTy) 3375 return ZI->getOperand(0); 3376 return B.CreateZExtOrTrunc(V, TruncatedTy); 3377 }; 3378 3379 // The actual instruction modification depends on the instruction type, 3380 // unfortunately. 3381 Value *NewI = nullptr; 3382 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3383 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3384 ShrinkOperand(BO->getOperand(1))); 3385 3386 // Any wrapping introduced by shrinking this operation shouldn't be 3387 // considered undefined behavior. So, we can't unconditionally copy 3388 // arithmetic wrapping flags to NewI. 3389 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3390 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3391 NewI = 3392 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3393 ShrinkOperand(CI->getOperand(1))); 3394 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3395 NewI = B.CreateSelect(SI->getCondition(), 3396 ShrinkOperand(SI->getTrueValue()), 3397 ShrinkOperand(SI->getFalseValue())); 3398 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3399 switch (CI->getOpcode()) { 3400 default: 3401 llvm_unreachable("Unhandled cast!"); 3402 case Instruction::Trunc: 3403 NewI = ShrinkOperand(CI->getOperand(0)); 3404 break; 3405 case Instruction::SExt: 3406 NewI = B.CreateSExtOrTrunc( 3407 CI->getOperand(0), 3408 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3409 break; 3410 case Instruction::ZExt: 3411 NewI = B.CreateZExtOrTrunc( 3412 CI->getOperand(0), 3413 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3414 break; 3415 } 3416 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3417 auto Elements0 = 3418 cast<VectorType>(SI->getOperand(0)->getType())->getNumElements(); 3419 auto *O0 = B.CreateZExtOrTrunc( 3420 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3421 auto Elements1 = 3422 cast<VectorType>(SI->getOperand(1)->getType())->getNumElements(); 3423 auto *O1 = B.CreateZExtOrTrunc( 3424 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3425 3426 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3427 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3428 // Don't do anything with the operands, just extend the result. 3429 continue; 3430 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3431 auto Elements = 3432 cast<VectorType>(IE->getOperand(0)->getType())->getNumElements(); 3433 auto *O0 = B.CreateZExtOrTrunc( 3434 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3435 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3436 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3437 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3438 auto Elements = 3439 cast<VectorType>(EE->getOperand(0)->getType())->getNumElements(); 3440 auto *O0 = B.CreateZExtOrTrunc( 3441 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3442 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3443 } else { 3444 // If we don't know what to do, be conservative and don't do anything. 3445 continue; 3446 } 3447 3448 // Lastly, extend the result. 3449 NewI->takeName(cast<Instruction>(I)); 3450 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3451 I->replaceAllUsesWith(Res); 3452 cast<Instruction>(I)->eraseFromParent(); 3453 Erased.insert(I); 3454 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3455 } 3456 } 3457 3458 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3459 for (const auto &KV : Cost->getMinimalBitwidths()) { 3460 // If the value wasn't vectorized, we must maintain the original scalar 3461 // type. The absence of the value from VectorLoopValueMap indicates that it 3462 // wasn't vectorized. 3463 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3464 continue; 3465 for (unsigned Part = 0; Part < UF; ++Part) { 3466 Value *I = getOrCreateVectorValue(KV.first, Part); 3467 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3468 if (Inst && Inst->use_empty()) { 3469 Value *NewI = Inst->getOperand(0); 3470 Inst->eraseFromParent(); 3471 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3472 } 3473 } 3474 } 3475 } 3476 3477 void InnerLoopVectorizer::fixVectorizedLoop() { 3478 // Insert truncates and extends for any truncated instructions as hints to 3479 // InstCombine. 3480 if (VF > 1) 3481 truncateToMinimalBitwidths(); 3482 3483 // Fix widened non-induction PHIs by setting up the PHI operands. 3484 if (OrigPHIsToFix.size()) { 3485 assert(EnableVPlanNativePath && 3486 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3487 fixNonInductionPHIs(); 3488 } 3489 3490 // At this point every instruction in the original loop is widened to a 3491 // vector form. Now we need to fix the recurrences in the loop. These PHI 3492 // nodes are currently empty because we did not want to introduce cycles. 3493 // This is the second stage of vectorizing recurrences. 3494 fixCrossIterationPHIs(); 3495 3496 // Forget the original basic block. 3497 PSE.getSE()->forgetLoop(OrigLoop); 3498 3499 // Fix-up external users of the induction variables. 3500 for (auto &Entry : Legal->getInductionVars()) 3501 fixupIVUsers(Entry.first, Entry.second, 3502 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3503 IVEndValues[Entry.first], LoopMiddleBlock); 3504 3505 fixLCSSAPHIs(); 3506 for (Instruction *PI : PredicatedInstructions) 3507 sinkScalarOperands(&*PI); 3508 3509 // Remove redundant induction instructions. 3510 cse(LoopVectorBody); 3511 3512 // Set/update profile weights for the vector and remainder loops as original 3513 // loop iterations are now distributed among them. Note that original loop 3514 // represented by LoopScalarBody becomes remainder loop after vectorization. 3515 // 3516 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3517 // end up getting slightly roughened result but that should be OK since 3518 // profile is not inherently precise anyway. Note also possible bypass of 3519 // vector code caused by legality checks is ignored, assigning all the weight 3520 // to the vector loop, optimistically. 3521 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), 3522 LI->getLoopFor(LoopVectorBody), 3523 LI->getLoopFor(LoopScalarBody), VF * UF); 3524 } 3525 3526 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3527 // In order to support recurrences we need to be able to vectorize Phi nodes. 3528 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3529 // stage #2: We now need to fix the recurrences by adding incoming edges to 3530 // the currently empty PHI nodes. At this point every instruction in the 3531 // original loop is widened to a vector form so we can use them to construct 3532 // the incoming edges. 3533 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3534 // Handle first-order recurrences and reductions that need to be fixed. 3535 if (Legal->isFirstOrderRecurrence(&Phi)) 3536 fixFirstOrderRecurrence(&Phi); 3537 else if (Legal->isReductionVariable(&Phi)) 3538 fixReduction(&Phi); 3539 } 3540 } 3541 3542 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3543 // This is the second phase of vectorizing first-order recurrences. An 3544 // overview of the transformation is described below. Suppose we have the 3545 // following loop. 3546 // 3547 // for (int i = 0; i < n; ++i) 3548 // b[i] = a[i] - a[i - 1]; 3549 // 3550 // There is a first-order recurrence on "a". For this loop, the shorthand 3551 // scalar IR looks like: 3552 // 3553 // scalar.ph: 3554 // s_init = a[-1] 3555 // br scalar.body 3556 // 3557 // scalar.body: 3558 // i = phi [0, scalar.ph], [i+1, scalar.body] 3559 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3560 // s2 = a[i] 3561 // b[i] = s2 - s1 3562 // br cond, scalar.body, ... 3563 // 3564 // In this example, s1 is a recurrence because it's value depends on the 3565 // previous iteration. In the first phase of vectorization, we created a 3566 // temporary value for s1. We now complete the vectorization and produce the 3567 // shorthand vector IR shown below (for VF = 4, UF = 1). 3568 // 3569 // vector.ph: 3570 // v_init = vector(..., ..., ..., a[-1]) 3571 // br vector.body 3572 // 3573 // vector.body 3574 // i = phi [0, vector.ph], [i+4, vector.body] 3575 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3576 // v2 = a[i, i+1, i+2, i+3]; 3577 // v3 = vector(v1(3), v2(0, 1, 2)) 3578 // b[i, i+1, i+2, i+3] = v2 - v3 3579 // br cond, vector.body, middle.block 3580 // 3581 // middle.block: 3582 // x = v2(3) 3583 // br scalar.ph 3584 // 3585 // scalar.ph: 3586 // s_init = phi [x, middle.block], [a[-1], otherwise] 3587 // br scalar.body 3588 // 3589 // After execution completes the vector loop, we extract the next value of 3590 // the recurrence (x) to use as the initial value in the scalar loop. 3591 3592 // Get the original loop preheader and single loop latch. 3593 auto *Preheader = OrigLoop->getLoopPreheader(); 3594 auto *Latch = OrigLoop->getLoopLatch(); 3595 3596 // Get the initial and previous values of the scalar recurrence. 3597 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3598 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3599 3600 // Create a vector from the initial value. 3601 auto *VectorInit = ScalarInit; 3602 if (VF > 1) { 3603 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3604 VectorInit = Builder.CreateInsertElement( 3605 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3606 Builder.getInt32(VF - 1), "vector.recur.init"); 3607 } 3608 3609 // We constructed a temporary phi node in the first phase of vectorization. 3610 // This phi node will eventually be deleted. 3611 Builder.SetInsertPoint( 3612 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3613 3614 // Create a phi node for the new recurrence. The current value will either be 3615 // the initial value inserted into a vector or loop-varying vector value. 3616 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3617 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3618 3619 // Get the vectorized previous value of the last part UF - 1. It appears last 3620 // among all unrolled iterations, due to the order of their construction. 3621 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3622 3623 // Find and set the insertion point after the previous value if it is an 3624 // instruction. 3625 BasicBlock::iterator InsertPt; 3626 // Note that the previous value may have been constant-folded so it is not 3627 // guaranteed to be an instruction in the vector loop. 3628 // FIXME: Loop invariant values do not form recurrences. We should deal with 3629 // them earlier. 3630 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 3631 InsertPt = LoopVectorBody->getFirstInsertionPt(); 3632 else { 3633 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 3634 if (isa<PHINode>(PreviousLastPart)) 3635 // If the previous value is a phi node, we should insert after all the phi 3636 // nodes in the block containing the PHI to avoid breaking basic block 3637 // verification. Note that the basic block may be different to 3638 // LoopVectorBody, in case we predicate the loop. 3639 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 3640 else 3641 InsertPt = ++PreviousInst->getIterator(); 3642 } 3643 Builder.SetInsertPoint(&*InsertPt); 3644 3645 // We will construct a vector for the recurrence by combining the values for 3646 // the current and previous iterations. This is the required shuffle mask. 3647 SmallVector<int, 8> ShuffleMask(VF); 3648 ShuffleMask[0] = VF - 1; 3649 for (unsigned I = 1; I < VF; ++I) 3650 ShuffleMask[I] = I + VF - 1; 3651 3652 // The vector from which to take the initial value for the current iteration 3653 // (actual or unrolled). Initially, this is the vector phi node. 3654 Value *Incoming = VecPhi; 3655 3656 // Shuffle the current and previous vector and update the vector parts. 3657 for (unsigned Part = 0; Part < UF; ++Part) { 3658 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3659 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3660 auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3661 ShuffleMask) 3662 : Incoming; 3663 PhiPart->replaceAllUsesWith(Shuffle); 3664 cast<Instruction>(PhiPart)->eraseFromParent(); 3665 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3666 Incoming = PreviousPart; 3667 } 3668 3669 // Fix the latch value of the new recurrence in the vector loop. 3670 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3671 3672 // Extract the last vector element in the middle block. This will be the 3673 // initial value for the recurrence when jumping to the scalar loop. 3674 auto *ExtractForScalar = Incoming; 3675 if (VF > 1) { 3676 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3677 ExtractForScalar = Builder.CreateExtractElement( 3678 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3679 } 3680 // Extract the second last element in the middle block if the 3681 // Phi is used outside the loop. We need to extract the phi itself 3682 // and not the last element (the phi update in the current iteration). This 3683 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3684 // when the scalar loop is not run at all. 3685 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3686 if (VF > 1) 3687 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3688 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3689 // When loop is unrolled without vectorizing, initialize 3690 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3691 // `Incoming`. This is analogous to the vectorized case above: extracting the 3692 // second last element when VF > 1. 3693 else if (UF > 1) 3694 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3695 3696 // Fix the initial value of the original recurrence in the scalar loop. 3697 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3698 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3699 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3700 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3701 Start->addIncoming(Incoming, BB); 3702 } 3703 3704 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3705 Phi->setName("scalar.recur"); 3706 3707 // Finally, fix users of the recurrence outside the loop. The users will need 3708 // either the last value of the scalar recurrence or the last value of the 3709 // vector recurrence we extracted in the middle block. Since the loop is in 3710 // LCSSA form, we just need to find all the phi nodes for the original scalar 3711 // recurrence in the exit block, and then add an edge for the middle block. 3712 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3713 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3714 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3715 } 3716 } 3717 } 3718 3719 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3720 Constant *Zero = Builder.getInt32(0); 3721 3722 // Get it's reduction variable descriptor. 3723 assert(Legal->isReductionVariable(Phi) && 3724 "Unable to find the reduction variable"); 3725 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 3726 3727 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3728 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3729 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3730 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3731 RdxDesc.getMinMaxRecurrenceKind(); 3732 setDebugLocFromInst(Builder, ReductionStartValue); 3733 3734 // We need to generate a reduction vector from the incoming scalar. 3735 // To do so, we need to generate the 'identity' vector and override 3736 // one of the elements with the incoming scalar reduction. We need 3737 // to do it in the vector-loop preheader. 3738 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3739 3740 // This is the vector-clone of the value that leaves the loop. 3741 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3742 3743 // Find the reduction identity variable. Zero for addition, or, xor, 3744 // one for multiplication, -1 for And. 3745 Value *Identity; 3746 Value *VectorStart; 3747 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3748 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3749 // MinMax reduction have the start value as their identify. 3750 if (VF == 1) { 3751 VectorStart = Identity = ReductionStartValue; 3752 } else { 3753 VectorStart = Identity = 3754 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3755 } 3756 } else { 3757 // Handle other reduction kinds: 3758 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3759 RK, VecTy->getScalarType()); 3760 if (VF == 1) { 3761 Identity = Iden; 3762 // This vector is the Identity vector where the first element is the 3763 // incoming scalar reduction. 3764 VectorStart = ReductionStartValue; 3765 } else { 3766 Identity = ConstantVector::getSplat({VF, false}, Iden); 3767 3768 // This vector is the Identity vector where the first element is the 3769 // incoming scalar reduction. 3770 VectorStart = 3771 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3772 } 3773 } 3774 3775 // Wrap flags are in general invalid after vectorization, clear them. 3776 clearReductionWrapFlags(RdxDesc); 3777 3778 // Fix the vector-loop phi. 3779 3780 // Reductions do not have to start at zero. They can start with 3781 // any loop invariant values. 3782 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3783 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3784 3785 for (unsigned Part = 0; Part < UF; ++Part) { 3786 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3787 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3788 // Make sure to add the reduction start value only to the 3789 // first unroll part. 3790 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3791 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3792 cast<PHINode>(VecRdxPhi) 3793 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3794 } 3795 3796 // Before each round, move the insertion point right between 3797 // the PHIs and the values we are going to write. 3798 // This allows us to write both PHINodes and the extractelement 3799 // instructions. 3800 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3801 3802 setDebugLocFromInst(Builder, LoopExitInst); 3803 3804 // If tail is folded by masking, the vector value to leave the loop should be 3805 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3806 // instead of the former. 3807 if (Cost->foldTailByMasking()) { 3808 for (unsigned Part = 0; Part < UF; ++Part) { 3809 Value *VecLoopExitInst = 3810 VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3811 Value *Sel = nullptr; 3812 for (User *U : VecLoopExitInst->users()) { 3813 if (isa<SelectInst>(U)) { 3814 assert(!Sel && "Reduction exit feeding two selects"); 3815 Sel = U; 3816 } else 3817 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3818 } 3819 assert(Sel && "Reduction exit feeds no select"); 3820 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel); 3821 } 3822 } 3823 3824 // If the vector reduction can be performed in a smaller type, we truncate 3825 // then extend the loop exit value to enable InstCombine to evaluate the 3826 // entire expression in the smaller type. 3827 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3828 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3829 Builder.SetInsertPoint( 3830 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3831 VectorParts RdxParts(UF); 3832 for (unsigned Part = 0; Part < UF; ++Part) { 3833 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3834 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3835 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3836 : Builder.CreateZExt(Trunc, VecTy); 3837 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3838 UI != RdxParts[Part]->user_end();) 3839 if (*UI != Trunc) { 3840 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3841 RdxParts[Part] = Extnd; 3842 } else { 3843 ++UI; 3844 } 3845 } 3846 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3847 for (unsigned Part = 0; Part < UF; ++Part) { 3848 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3849 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3850 } 3851 } 3852 3853 // Reduce all of the unrolled parts into a single vector. 3854 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3855 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3856 3857 // The middle block terminator has already been assigned a DebugLoc here (the 3858 // OrigLoop's single latch terminator). We want the whole middle block to 3859 // appear to execute on this line because: (a) it is all compiler generated, 3860 // (b) these instructions are always executed after evaluating the latch 3861 // conditional branch, and (c) other passes may add new predecessors which 3862 // terminate on this line. This is the easiest way to ensure we don't 3863 // accidentally cause an extra step back into the loop while debugging. 3864 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 3865 for (unsigned Part = 1; Part < UF; ++Part) { 3866 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3867 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3868 // Floating point operations had to be 'fast' to enable the reduction. 3869 ReducedPartRdx = addFastMathFlag( 3870 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3871 ReducedPartRdx, "bin.rdx"), 3872 RdxDesc.getFastMathFlags()); 3873 else 3874 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3875 RdxPart); 3876 } 3877 3878 if (VF > 1) { 3879 bool NoNaN = Legal->hasFunNoNaNAttr(); 3880 ReducedPartRdx = 3881 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3882 // If the reduction can be performed in a smaller type, we need to extend 3883 // the reduction to the wider type before we branch to the original loop. 3884 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3885 ReducedPartRdx = 3886 RdxDesc.isSigned() 3887 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3888 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3889 } 3890 3891 // Create a phi node that merges control-flow from the backedge-taken check 3892 // block and the middle block. 3893 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3894 LoopScalarPreHeader->getTerminator()); 3895 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3896 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3897 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3898 3899 // Now, we need to fix the users of the reduction variable 3900 // inside and outside of the scalar remainder loop. 3901 // We know that the loop is in LCSSA form. We need to update the 3902 // PHI nodes in the exit blocks. 3903 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3904 // All PHINodes need to have a single entry edge, or two if 3905 // we already fixed them. 3906 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3907 3908 // We found a reduction value exit-PHI. Update it with the 3909 // incoming bypass edge. 3910 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3911 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3912 } // end of the LCSSA phi scan. 3913 3914 // Fix the scalar loop reduction variable with the incoming reduction sum 3915 // from the vector body and from the backedge value. 3916 int IncomingEdgeBlockIdx = 3917 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3918 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3919 // Pick the other block. 3920 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3921 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3922 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3923 } 3924 3925 void InnerLoopVectorizer::clearReductionWrapFlags( 3926 RecurrenceDescriptor &RdxDesc) { 3927 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3928 if (RK != RecurrenceDescriptor::RK_IntegerAdd && 3929 RK != RecurrenceDescriptor::RK_IntegerMult) 3930 return; 3931 3932 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 3933 assert(LoopExitInstr && "null loop exit instruction"); 3934 SmallVector<Instruction *, 8> Worklist; 3935 SmallPtrSet<Instruction *, 8> Visited; 3936 Worklist.push_back(LoopExitInstr); 3937 Visited.insert(LoopExitInstr); 3938 3939 while (!Worklist.empty()) { 3940 Instruction *Cur = Worklist.pop_back_val(); 3941 if (isa<OverflowingBinaryOperator>(Cur)) 3942 for (unsigned Part = 0; Part < UF; ++Part) { 3943 Value *V = getOrCreateVectorValue(Cur, Part); 3944 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 3945 } 3946 3947 for (User *U : Cur->users()) { 3948 Instruction *UI = cast<Instruction>(U); 3949 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 3950 Visited.insert(UI).second) 3951 Worklist.push_back(UI); 3952 } 3953 } 3954 } 3955 3956 void InnerLoopVectorizer::fixLCSSAPHIs() { 3957 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3958 if (LCSSAPhi.getNumIncomingValues() == 1) { 3959 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3960 // Non-instruction incoming values will have only one value. 3961 unsigned LastLane = 0; 3962 if (isa<Instruction>(IncomingValue)) 3963 LastLane = Cost->isUniformAfterVectorization( 3964 cast<Instruction>(IncomingValue), VF) 3965 ? 0 3966 : VF - 1; 3967 // Can be a loop invariant incoming value or the last scalar value to be 3968 // extracted from the vectorized loop. 3969 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3970 Value *lastIncomingValue = 3971 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3972 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3973 } 3974 } 3975 } 3976 3977 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3978 // The basic block and loop containing the predicated instruction. 3979 auto *PredBB = PredInst->getParent(); 3980 auto *VectorLoop = LI->getLoopFor(PredBB); 3981 3982 // Initialize a worklist with the operands of the predicated instruction. 3983 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3984 3985 // Holds instructions that we need to analyze again. An instruction may be 3986 // reanalyzed if we don't yet know if we can sink it or not. 3987 SmallVector<Instruction *, 8> InstsToReanalyze; 3988 3989 // Returns true if a given use occurs in the predicated block. Phi nodes use 3990 // their operands in their corresponding predecessor blocks. 3991 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3992 auto *I = cast<Instruction>(U.getUser()); 3993 BasicBlock *BB = I->getParent(); 3994 if (auto *Phi = dyn_cast<PHINode>(I)) 3995 BB = Phi->getIncomingBlock( 3996 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3997 return BB == PredBB; 3998 }; 3999 4000 // Iteratively sink the scalarized operands of the predicated instruction 4001 // into the block we created for it. When an instruction is sunk, it's 4002 // operands are then added to the worklist. The algorithm ends after one pass 4003 // through the worklist doesn't sink a single instruction. 4004 bool Changed; 4005 do { 4006 // Add the instructions that need to be reanalyzed to the worklist, and 4007 // reset the changed indicator. 4008 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4009 InstsToReanalyze.clear(); 4010 Changed = false; 4011 4012 while (!Worklist.empty()) { 4013 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4014 4015 // We can't sink an instruction if it is a phi node, is already in the 4016 // predicated block, is not in the loop, or may have side effects. 4017 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4018 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4019 continue; 4020 4021 // It's legal to sink the instruction if all its uses occur in the 4022 // predicated block. Otherwise, there's nothing to do yet, and we may 4023 // need to reanalyze the instruction. 4024 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4025 InstsToReanalyze.push_back(I); 4026 continue; 4027 } 4028 4029 // Move the instruction to the beginning of the predicated block, and add 4030 // it's operands to the worklist. 4031 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4032 Worklist.insert(I->op_begin(), I->op_end()); 4033 4034 // The sinking may have enabled other instructions to be sunk, so we will 4035 // need to iterate. 4036 Changed = true; 4037 } 4038 } while (Changed); 4039 } 4040 4041 void InnerLoopVectorizer::fixNonInductionPHIs() { 4042 for (PHINode *OrigPhi : OrigPHIsToFix) { 4043 PHINode *NewPhi = 4044 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 4045 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 4046 4047 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 4048 predecessors(OrigPhi->getParent())); 4049 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 4050 predecessors(NewPhi->getParent())); 4051 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 4052 "Scalar and Vector BB should have the same number of predecessors"); 4053 4054 // The insertion point in Builder may be invalidated by the time we get 4055 // here. Force the Builder insertion point to something valid so that we do 4056 // not run into issues during insertion point restore in 4057 // getOrCreateVectorValue calls below. 4058 Builder.SetInsertPoint(NewPhi); 4059 4060 // The predecessor order is preserved and we can rely on mapping between 4061 // scalar and vector block predecessors. 4062 for (unsigned i = 0; i < NumIncomingValues; ++i) { 4063 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 4064 4065 // When looking up the new scalar/vector values to fix up, use incoming 4066 // values from original phi. 4067 Value *ScIncV = 4068 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 4069 4070 // Scalar incoming value may need a broadcast 4071 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 4072 NewPhi->addIncoming(NewIncV, NewPredBB); 4073 } 4074 } 4075 } 4076 4077 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, unsigned UF, 4078 unsigned VF, bool IsPtrLoopInvariant, 4079 SmallBitVector &IsIndexLoopInvariant) { 4080 // Construct a vector GEP by widening the operands of the scalar GEP as 4081 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4082 // results in a vector of pointers when at least one operand of the GEP 4083 // is vector-typed. Thus, to keep the representation compact, we only use 4084 // vector-typed operands for loop-varying values. 4085 4086 if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4087 // If we are vectorizing, but the GEP has only loop-invariant operands, 4088 // the GEP we build (by only using vector-typed operands for 4089 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4090 // produce a vector of pointers, we need to either arbitrarily pick an 4091 // operand to broadcast, or broadcast a clone of the original GEP. 4092 // Here, we broadcast a clone of the original. 4093 // 4094 // TODO: If at some point we decide to scalarize instructions having 4095 // loop-invariant operands, this special case will no longer be 4096 // required. We would add the scalarization decision to 4097 // collectLoopScalars() and teach getVectorValue() to broadcast 4098 // the lane-zero scalar value. 4099 auto *Clone = Builder.Insert(GEP->clone()); 4100 for (unsigned Part = 0; Part < UF; ++Part) { 4101 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4102 VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart); 4103 addMetadata(EntryPart, GEP); 4104 } 4105 } else { 4106 // If the GEP has at least one loop-varying operand, we are sure to 4107 // produce a vector of pointers. But if we are only unrolling, we want 4108 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4109 // produce with the code below will be scalar (if VF == 1) or vector 4110 // (otherwise). Note that for the unroll-only case, we still maintain 4111 // values in the vector mapping with initVector, as we do for other 4112 // instructions. 4113 for (unsigned Part = 0; Part < UF; ++Part) { 4114 // The pointer operand of the new GEP. If it's loop-invariant, we 4115 // won't broadcast it. 4116 auto *Ptr = IsPtrLoopInvariant 4117 ? GEP->getPointerOperand() 4118 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 4119 4120 // Collect all the indices for the new GEP. If any index is 4121 // loop-invariant, we won't broadcast it. 4122 SmallVector<Value *, 4> Indices; 4123 for (auto Index : enumerate(GEP->indices())) { 4124 Value *User = Index.value().get(); 4125 if (IsIndexLoopInvariant[Index.index()]) 4126 Indices.push_back(User); 4127 else 4128 Indices.push_back(getOrCreateVectorValue(User, Part)); 4129 } 4130 4131 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4132 // but it should be a vector, otherwise. 4133 auto *NewGEP = 4134 GEP->isInBounds() 4135 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4136 Indices) 4137 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4138 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4139 "NewGEP is not a pointer vector"); 4140 VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP); 4141 addMetadata(NewGEP, GEP); 4142 } 4143 } 4144 } 4145 4146 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4147 unsigned VF) { 4148 PHINode *P = cast<PHINode>(PN); 4149 if (EnableVPlanNativePath) { 4150 // Currently we enter here in the VPlan-native path for non-induction 4151 // PHIs where all control flow is uniform. We simply widen these PHIs. 4152 // Create a vector phi with no operands - the vector phi operands will be 4153 // set at the end of vector code generation. 4154 Type *VecTy = 4155 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4156 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4157 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 4158 OrigPHIsToFix.push_back(P); 4159 4160 return; 4161 } 4162 4163 assert(PN->getParent() == OrigLoop->getHeader() && 4164 "Non-header phis should have been handled elsewhere"); 4165 4166 // In order to support recurrences we need to be able to vectorize Phi nodes. 4167 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4168 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4169 // this value when we vectorize all of the instructions that use the PHI. 4170 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4171 for (unsigned Part = 0; Part < UF; ++Part) { 4172 // This is phase one of vectorizing PHIs. 4173 Type *VecTy = 4174 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4175 Value *EntryPart = PHINode::Create( 4176 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4177 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4178 } 4179 return; 4180 } 4181 4182 setDebugLocFromInst(Builder, P); 4183 4184 // This PHINode must be an induction variable. 4185 // Make sure that we know about it. 4186 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4187 4188 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4189 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4190 4191 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4192 // which can be found from the original scalar operations. 4193 switch (II.getKind()) { 4194 case InductionDescriptor::IK_NoInduction: 4195 llvm_unreachable("Unknown induction"); 4196 case InductionDescriptor::IK_IntInduction: 4197 case InductionDescriptor::IK_FpInduction: 4198 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4199 case InductionDescriptor::IK_PtrInduction: { 4200 // Handle the pointer induction variable case. 4201 assert(P->getType()->isPointerTy() && "Unexpected type."); 4202 // This is the normalized GEP that starts counting at zero. 4203 Value *PtrInd = Induction; 4204 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4205 // Determine the number of scalars we need to generate for each unroll 4206 // iteration. If the instruction is uniform, we only need to generate the 4207 // first lane. Otherwise, we generate all VF values. 4208 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4209 // These are the scalar results. Notice that we don't generate vector GEPs 4210 // because scalar GEPs result in better code. 4211 for (unsigned Part = 0; Part < UF; ++Part) { 4212 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4213 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4214 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4215 Value *SclrGep = 4216 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4217 SclrGep->setName("next.gep"); 4218 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4219 } 4220 } 4221 return; 4222 } 4223 } 4224 } 4225 4226 /// A helper function for checking whether an integer division-related 4227 /// instruction may divide by zero (in which case it must be predicated if 4228 /// executed conditionally in the scalar code). 4229 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4230 /// Non-zero divisors that are non compile-time constants will not be 4231 /// converted into multiplication, so we will still end up scalarizing 4232 /// the division, but can do so w/o predication. 4233 static bool mayDivideByZero(Instruction &I) { 4234 assert((I.getOpcode() == Instruction::UDiv || 4235 I.getOpcode() == Instruction::SDiv || 4236 I.getOpcode() == Instruction::URem || 4237 I.getOpcode() == Instruction::SRem) && 4238 "Unexpected instruction"); 4239 Value *Divisor = I.getOperand(1); 4240 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4241 return !CInt || CInt->isZero(); 4242 } 4243 4244 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User, 4245 VPTransformState &State) { 4246 switch (I.getOpcode()) { 4247 case Instruction::Call: 4248 case Instruction::Br: 4249 case Instruction::PHI: 4250 case Instruction::GetElementPtr: 4251 case Instruction::Select: 4252 llvm_unreachable("This instruction is handled by a different recipe."); 4253 case Instruction::UDiv: 4254 case Instruction::SDiv: 4255 case Instruction::SRem: 4256 case Instruction::URem: 4257 case Instruction::Add: 4258 case Instruction::FAdd: 4259 case Instruction::Sub: 4260 case Instruction::FSub: 4261 case Instruction::FNeg: 4262 case Instruction::Mul: 4263 case Instruction::FMul: 4264 case Instruction::FDiv: 4265 case Instruction::FRem: 4266 case Instruction::Shl: 4267 case Instruction::LShr: 4268 case Instruction::AShr: 4269 case Instruction::And: 4270 case Instruction::Or: 4271 case Instruction::Xor: { 4272 // Just widen unops and binops. 4273 setDebugLocFromInst(Builder, &I); 4274 4275 for (unsigned Part = 0; Part < UF; ++Part) { 4276 SmallVector<Value *, 2> Ops; 4277 for (VPValue *VPOp : User.operands()) 4278 Ops.push_back(State.get(VPOp, Part)); 4279 4280 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4281 4282 if (auto *VecOp = dyn_cast<Instruction>(V)) 4283 VecOp->copyIRFlags(&I); 4284 4285 // Use this vector value for all users of the original instruction. 4286 VectorLoopValueMap.setVectorValue(&I, Part, V); 4287 addMetadata(V, &I); 4288 } 4289 4290 break; 4291 } 4292 case Instruction::ICmp: 4293 case Instruction::FCmp: { 4294 // Widen compares. Generate vector compares. 4295 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4296 auto *Cmp = cast<CmpInst>(&I); 4297 setDebugLocFromInst(Builder, Cmp); 4298 for (unsigned Part = 0; Part < UF; ++Part) { 4299 Value *A = State.get(User.getOperand(0), Part); 4300 Value *B = State.get(User.getOperand(1), Part); 4301 Value *C = nullptr; 4302 if (FCmp) { 4303 // Propagate fast math flags. 4304 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4305 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4306 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4307 } else { 4308 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4309 } 4310 VectorLoopValueMap.setVectorValue(&I, Part, C); 4311 addMetadata(C, &I); 4312 } 4313 4314 break; 4315 } 4316 4317 case Instruction::ZExt: 4318 case Instruction::SExt: 4319 case Instruction::FPToUI: 4320 case Instruction::FPToSI: 4321 case Instruction::FPExt: 4322 case Instruction::PtrToInt: 4323 case Instruction::IntToPtr: 4324 case Instruction::SIToFP: 4325 case Instruction::UIToFP: 4326 case Instruction::Trunc: 4327 case Instruction::FPTrunc: 4328 case Instruction::BitCast: { 4329 auto *CI = cast<CastInst>(&I); 4330 setDebugLocFromInst(Builder, CI); 4331 4332 /// Vectorize casts. 4333 Type *DestTy = 4334 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4335 4336 for (unsigned Part = 0; Part < UF; ++Part) { 4337 Value *A = State.get(User.getOperand(0), Part); 4338 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4339 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4340 addMetadata(Cast, &I); 4341 } 4342 break; 4343 } 4344 default: 4345 // This instruction is not vectorized by simple widening. 4346 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4347 llvm_unreachable("Unhandled instruction!"); 4348 } // end of switch. 4349 } 4350 4351 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands, 4352 VPTransformState &State) { 4353 assert(!isa<DbgInfoIntrinsic>(I) && 4354 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4355 setDebugLocFromInst(Builder, &I); 4356 4357 Module *M = I.getParent()->getParent()->getParent(); 4358 auto *CI = cast<CallInst>(&I); 4359 4360 SmallVector<Type *, 4> Tys; 4361 for (Value *ArgOperand : CI->arg_operands()) 4362 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4363 4364 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4365 4366 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4367 // version of the instruction. 4368 // Is it beneficial to perform intrinsic call compared to lib call? 4369 bool NeedToScalarize = false; 4370 unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4371 bool UseVectorIntrinsic = 4372 ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost; 4373 assert((UseVectorIntrinsic || !NeedToScalarize) && 4374 "Instruction should be scalarized elsewhere."); 4375 4376 for (unsigned Part = 0; Part < UF; ++Part) { 4377 SmallVector<Value *, 4> Args; 4378 for (auto &I : enumerate(ArgOperands.operands())) { 4379 // Some intrinsics have a scalar argument - don't replace it with a 4380 // vector. 4381 Value *Arg; 4382 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4383 Arg = State.get(I.value(), Part); 4384 else 4385 Arg = State.get(I.value(), {0, 0}); 4386 Args.push_back(Arg); 4387 } 4388 4389 Function *VectorF; 4390 if (UseVectorIntrinsic) { 4391 // Use vector version of the intrinsic. 4392 Type *TysForDecl[] = {CI->getType()}; 4393 if (VF > 1) 4394 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4395 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4396 assert(VectorF && "Can't retrieve vector intrinsic."); 4397 } else { 4398 // Use vector version of the function call. 4399 const VFShape Shape = 4400 VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/); 4401 #ifndef NDEBUG 4402 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4403 "Can't create vector function."); 4404 #endif 4405 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4406 } 4407 SmallVector<OperandBundleDef, 1> OpBundles; 4408 CI->getOperandBundlesAsDefs(OpBundles); 4409 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4410 4411 if (isa<FPMathOperator>(V)) 4412 V->copyFastMathFlags(CI); 4413 4414 VectorLoopValueMap.setVectorValue(&I, Part, V); 4415 addMetadata(V, &I); 4416 } 4417 } 4418 4419 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, 4420 bool InvariantCond) { 4421 setDebugLocFromInst(Builder, &I); 4422 4423 // The condition can be loop invariant but still defined inside the 4424 // loop. This means that we can't just use the original 'cond' value. 4425 // We have to take the 'vectorized' value and pick the first lane. 4426 // Instcombine will make this a no-op. 4427 4428 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4429 4430 for (unsigned Part = 0; Part < UF; ++Part) { 4431 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4432 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4433 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4434 Value *Sel = 4435 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4436 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4437 addMetadata(Sel, &I); 4438 } 4439 } 4440 4441 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4442 // We should not collect Scalars more than once per VF. Right now, this 4443 // function is called from collectUniformsAndScalars(), which already does 4444 // this check. Collecting Scalars for VF=1 does not make any sense. 4445 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4446 "This function should not be visited twice for the same VF"); 4447 4448 SmallSetVector<Instruction *, 8> Worklist; 4449 4450 // These sets are used to seed the analysis with pointers used by memory 4451 // accesses that will remain scalar. 4452 SmallSetVector<Instruction *, 8> ScalarPtrs; 4453 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4454 4455 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4456 // The pointer operands of loads and stores will be scalar as long as the 4457 // memory access is not a gather or scatter operation. The value operand of a 4458 // store will remain scalar if the store is scalarized. 4459 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4460 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4461 assert(WideningDecision != CM_Unknown && 4462 "Widening decision should be ready at this moment"); 4463 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4464 if (Ptr == Store->getValueOperand()) 4465 return WideningDecision == CM_Scalarize; 4466 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4467 "Ptr is neither a value or pointer operand"); 4468 return WideningDecision != CM_GatherScatter; 4469 }; 4470 4471 // A helper that returns true if the given value is a bitcast or 4472 // getelementptr instruction contained in the loop. 4473 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4474 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4475 isa<GetElementPtrInst>(V)) && 4476 !TheLoop->isLoopInvariant(V); 4477 }; 4478 4479 // A helper that evaluates a memory access's use of a pointer. If the use 4480 // will be a scalar use, and the pointer is only used by memory accesses, we 4481 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4482 // PossibleNonScalarPtrs. 4483 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4484 // We only care about bitcast and getelementptr instructions contained in 4485 // the loop. 4486 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4487 return; 4488 4489 // If the pointer has already been identified as scalar (e.g., if it was 4490 // also identified as uniform), there's nothing to do. 4491 auto *I = cast<Instruction>(Ptr); 4492 if (Worklist.count(I)) 4493 return; 4494 4495 // If the use of the pointer will be a scalar use, and all users of the 4496 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4497 // place the pointer in PossibleNonScalarPtrs. 4498 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4499 return isa<LoadInst>(U) || isa<StoreInst>(U); 4500 })) 4501 ScalarPtrs.insert(I); 4502 else 4503 PossibleNonScalarPtrs.insert(I); 4504 }; 4505 4506 // We seed the scalars analysis with three classes of instructions: (1) 4507 // instructions marked uniform-after-vectorization, (2) bitcast and 4508 // getelementptr instructions used by memory accesses requiring a scalar use, 4509 // and (3) pointer induction variables and their update instructions (we 4510 // currently only scalarize these). 4511 // 4512 // (1) Add to the worklist all instructions that have been identified as 4513 // uniform-after-vectorization. 4514 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4515 4516 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4517 // memory accesses requiring a scalar use. The pointer operands of loads and 4518 // stores will be scalar as long as the memory accesses is not a gather or 4519 // scatter operation. The value operand of a store will remain scalar if the 4520 // store is scalarized. 4521 for (auto *BB : TheLoop->blocks()) 4522 for (auto &I : *BB) { 4523 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4524 evaluatePtrUse(Load, Load->getPointerOperand()); 4525 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4526 evaluatePtrUse(Store, Store->getPointerOperand()); 4527 evaluatePtrUse(Store, Store->getValueOperand()); 4528 } 4529 } 4530 for (auto *I : ScalarPtrs) 4531 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4532 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4533 Worklist.insert(I); 4534 } 4535 4536 // (3) Add to the worklist all pointer induction variables and their update 4537 // instructions. 4538 // 4539 // TODO: Once we are able to vectorize pointer induction variables we should 4540 // no longer insert them into the worklist here. 4541 auto *Latch = TheLoop->getLoopLatch(); 4542 for (auto &Induction : Legal->getInductionVars()) { 4543 auto *Ind = Induction.first; 4544 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4545 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4546 continue; 4547 Worklist.insert(Ind); 4548 Worklist.insert(IndUpdate); 4549 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4550 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4551 << "\n"); 4552 } 4553 4554 // Insert the forced scalars. 4555 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4556 // induction variable when the PHI user is scalarized. 4557 auto ForcedScalar = ForcedScalars.find(VF); 4558 if (ForcedScalar != ForcedScalars.end()) 4559 for (auto *I : ForcedScalar->second) 4560 Worklist.insert(I); 4561 4562 // Expand the worklist by looking through any bitcasts and getelementptr 4563 // instructions we've already identified as scalar. This is similar to the 4564 // expansion step in collectLoopUniforms(); however, here we're only 4565 // expanding to include additional bitcasts and getelementptr instructions. 4566 unsigned Idx = 0; 4567 while (Idx != Worklist.size()) { 4568 Instruction *Dst = Worklist[Idx++]; 4569 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4570 continue; 4571 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4572 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4573 auto *J = cast<Instruction>(U); 4574 return !TheLoop->contains(J) || Worklist.count(J) || 4575 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4576 isScalarUse(J, Src)); 4577 })) { 4578 Worklist.insert(Src); 4579 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4580 } 4581 } 4582 4583 // An induction variable will remain scalar if all users of the induction 4584 // variable and induction variable update remain scalar. 4585 for (auto &Induction : Legal->getInductionVars()) { 4586 auto *Ind = Induction.first; 4587 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4588 4589 // We already considered pointer induction variables, so there's no reason 4590 // to look at their users again. 4591 // 4592 // TODO: Once we are able to vectorize pointer induction variables we 4593 // should no longer skip over them here. 4594 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4595 continue; 4596 4597 // Determine if all users of the induction variable are scalar after 4598 // vectorization. 4599 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4600 auto *I = cast<Instruction>(U); 4601 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4602 }); 4603 if (!ScalarInd) 4604 continue; 4605 4606 // Determine if all users of the induction variable update instruction are 4607 // scalar after vectorization. 4608 auto ScalarIndUpdate = 4609 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4610 auto *I = cast<Instruction>(U); 4611 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4612 }); 4613 if (!ScalarIndUpdate) 4614 continue; 4615 4616 // The induction variable and its update instruction will remain scalar. 4617 Worklist.insert(Ind); 4618 Worklist.insert(IndUpdate); 4619 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4620 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4621 << "\n"); 4622 } 4623 4624 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4625 } 4626 4627 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4628 if (!blockNeedsPredication(I->getParent())) 4629 return false; 4630 switch(I->getOpcode()) { 4631 default: 4632 break; 4633 case Instruction::Load: 4634 case Instruction::Store: { 4635 if (!Legal->isMaskRequired(I)) 4636 return false; 4637 auto *Ptr = getLoadStorePointerOperand(I); 4638 auto *Ty = getMemInstValueType(I); 4639 // We have already decided how to vectorize this instruction, get that 4640 // result. 4641 if (VF > 1) { 4642 InstWidening WideningDecision = getWideningDecision(I, VF); 4643 assert(WideningDecision != CM_Unknown && 4644 "Widening decision should be ready at this moment"); 4645 return WideningDecision == CM_Scalarize; 4646 } 4647 const MaybeAlign Alignment = getLoadStoreAlignment(I); 4648 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4649 isLegalMaskedGather(Ty, Alignment)) 4650 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4651 isLegalMaskedScatter(Ty, Alignment)); 4652 } 4653 case Instruction::UDiv: 4654 case Instruction::SDiv: 4655 case Instruction::SRem: 4656 case Instruction::URem: 4657 return mayDivideByZero(*I); 4658 } 4659 return false; 4660 } 4661 4662 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4663 unsigned VF) { 4664 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4665 assert(getWideningDecision(I, VF) == CM_Unknown && 4666 "Decision should not be set yet."); 4667 auto *Group = getInterleavedAccessGroup(I); 4668 assert(Group && "Must have a group."); 4669 4670 // If the instruction's allocated size doesn't equal it's type size, it 4671 // requires padding and will be scalarized. 4672 auto &DL = I->getModule()->getDataLayout(); 4673 auto *ScalarTy = getMemInstValueType(I); 4674 if (hasIrregularType(ScalarTy, DL, VF)) 4675 return false; 4676 4677 // Check if masking is required. 4678 // A Group may need masking for one of two reasons: it resides in a block that 4679 // needs predication, or it was decided to use masking to deal with gaps. 4680 bool PredicatedAccessRequiresMasking = 4681 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4682 bool AccessWithGapsRequiresMasking = 4683 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 4684 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4685 return true; 4686 4687 // If masked interleaving is required, we expect that the user/target had 4688 // enabled it, because otherwise it either wouldn't have been created or 4689 // it should have been invalidated by the CostModel. 4690 assert(useMaskedInterleavedAccesses(TTI) && 4691 "Masked interleave-groups for predicated accesses are not enabled."); 4692 4693 auto *Ty = getMemInstValueType(I); 4694 const MaybeAlign Alignment = getLoadStoreAlignment(I); 4695 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4696 : TTI.isLegalMaskedStore(Ty, Alignment); 4697 } 4698 4699 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4700 unsigned VF) { 4701 // Get and ensure we have a valid memory instruction. 4702 LoadInst *LI = dyn_cast<LoadInst>(I); 4703 StoreInst *SI = dyn_cast<StoreInst>(I); 4704 assert((LI || SI) && "Invalid memory instruction"); 4705 4706 auto *Ptr = getLoadStorePointerOperand(I); 4707 4708 // In order to be widened, the pointer should be consecutive, first of all. 4709 if (!Legal->isConsecutivePtr(Ptr)) 4710 return false; 4711 4712 // If the instruction is a store located in a predicated block, it will be 4713 // scalarized. 4714 if (isScalarWithPredication(I)) 4715 return false; 4716 4717 // If the instruction's allocated size doesn't equal it's type size, it 4718 // requires padding and will be scalarized. 4719 auto &DL = I->getModule()->getDataLayout(); 4720 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4721 if (hasIrregularType(ScalarTy, DL, VF)) 4722 return false; 4723 4724 return true; 4725 } 4726 4727 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4728 // We should not collect Uniforms more than once per VF. Right now, 4729 // this function is called from collectUniformsAndScalars(), which 4730 // already does this check. Collecting Uniforms for VF=1 does not make any 4731 // sense. 4732 4733 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4734 "This function should not be visited twice for the same VF"); 4735 4736 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4737 // not analyze again. Uniforms.count(VF) will return 1. 4738 Uniforms[VF].clear(); 4739 4740 // We now know that the loop is vectorizable! 4741 // Collect instructions inside the loop that will remain uniform after 4742 // vectorization. 4743 4744 // Global values, params and instructions outside of current loop are out of 4745 // scope. 4746 auto isOutOfScope = [&](Value *V) -> bool { 4747 Instruction *I = dyn_cast<Instruction>(V); 4748 return (!I || !TheLoop->contains(I)); 4749 }; 4750 4751 SetVector<Instruction *> Worklist; 4752 BasicBlock *Latch = TheLoop->getLoopLatch(); 4753 4754 // Instructions that are scalar with predication must not be considered 4755 // uniform after vectorization, because that would create an erroneous 4756 // replicating region where only a single instance out of VF should be formed. 4757 // TODO: optimize such seldom cases if found important, see PR40816. 4758 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4759 if (isScalarWithPredication(I, VF)) { 4760 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4761 << *I << "\n"); 4762 return; 4763 } 4764 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4765 Worklist.insert(I); 4766 }; 4767 4768 // Start with the conditional branch. If the branch condition is an 4769 // instruction contained in the loop that is only used by the branch, it is 4770 // uniform. 4771 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4772 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4773 addToWorklistIfAllowed(Cmp); 4774 4775 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4776 // are pointers that are treated like consecutive pointers during 4777 // vectorization. The pointer operands of interleaved accesses are an 4778 // example. 4779 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4780 4781 // Holds pointer operands of instructions that are possibly non-uniform. 4782 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4783 4784 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4785 InstWidening WideningDecision = getWideningDecision(I, VF); 4786 assert(WideningDecision != CM_Unknown && 4787 "Widening decision should be ready at this moment"); 4788 4789 return (WideningDecision == CM_Widen || 4790 WideningDecision == CM_Widen_Reverse || 4791 WideningDecision == CM_Interleave); 4792 }; 4793 // Iterate over the instructions in the loop, and collect all 4794 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4795 // that a consecutive-like pointer operand will be scalarized, we collect it 4796 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4797 // getelementptr instruction can be used by both vectorized and scalarized 4798 // memory instructions. For example, if a loop loads and stores from the same 4799 // location, but the store is conditional, the store will be scalarized, and 4800 // the getelementptr won't remain uniform. 4801 for (auto *BB : TheLoop->blocks()) 4802 for (auto &I : *BB) { 4803 // If there's no pointer operand, there's nothing to do. 4804 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4805 if (!Ptr) 4806 continue; 4807 4808 // True if all users of Ptr are memory accesses that have Ptr as their 4809 // pointer operand. 4810 auto UsersAreMemAccesses = 4811 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4812 return getLoadStorePointerOperand(U) == Ptr; 4813 }); 4814 4815 // Ensure the memory instruction will not be scalarized or used by 4816 // gather/scatter, making its pointer operand non-uniform. If the pointer 4817 // operand is used by any instruction other than a memory access, we 4818 // conservatively assume the pointer operand may be non-uniform. 4819 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4820 PossibleNonUniformPtrs.insert(Ptr); 4821 4822 // If the memory instruction will be vectorized and its pointer operand 4823 // is consecutive-like, or interleaving - the pointer operand should 4824 // remain uniform. 4825 else 4826 ConsecutiveLikePtrs.insert(Ptr); 4827 } 4828 4829 // Add to the Worklist all consecutive and consecutive-like pointers that 4830 // aren't also identified as possibly non-uniform. 4831 for (auto *V : ConsecutiveLikePtrs) 4832 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) 4833 addToWorklistIfAllowed(V); 4834 4835 // Expand Worklist in topological order: whenever a new instruction 4836 // is added , its users should be already inside Worklist. It ensures 4837 // a uniform instruction will only be used by uniform instructions. 4838 unsigned idx = 0; 4839 while (idx != Worklist.size()) { 4840 Instruction *I = Worklist[idx++]; 4841 4842 for (auto OV : I->operand_values()) { 4843 // isOutOfScope operands cannot be uniform instructions. 4844 if (isOutOfScope(OV)) 4845 continue; 4846 // First order recurrence Phi's should typically be considered 4847 // non-uniform. 4848 auto *OP = dyn_cast<PHINode>(OV); 4849 if (OP && Legal->isFirstOrderRecurrence(OP)) 4850 continue; 4851 // If all the users of the operand are uniform, then add the 4852 // operand into the uniform worklist. 4853 auto *OI = cast<Instruction>(OV); 4854 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4855 auto *J = cast<Instruction>(U); 4856 return Worklist.count(J) || 4857 (OI == getLoadStorePointerOperand(J) && 4858 isUniformDecision(J, VF)); 4859 })) 4860 addToWorklistIfAllowed(OI); 4861 } 4862 } 4863 4864 // Returns true if Ptr is the pointer operand of a memory access instruction 4865 // I, and I is known to not require scalarization. 4866 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4867 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4868 }; 4869 4870 // For an instruction to be added into Worklist above, all its users inside 4871 // the loop should also be in Worklist. However, this condition cannot be 4872 // true for phi nodes that form a cyclic dependence. We must process phi 4873 // nodes separately. An induction variable will remain uniform if all users 4874 // of the induction variable and induction variable update remain uniform. 4875 // The code below handles both pointer and non-pointer induction variables. 4876 for (auto &Induction : Legal->getInductionVars()) { 4877 auto *Ind = Induction.first; 4878 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4879 4880 // Determine if all users of the induction variable are uniform after 4881 // vectorization. 4882 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4883 auto *I = cast<Instruction>(U); 4884 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4885 isVectorizedMemAccessUse(I, Ind); 4886 }); 4887 if (!UniformInd) 4888 continue; 4889 4890 // Determine if all users of the induction variable update instruction are 4891 // uniform after vectorization. 4892 auto UniformIndUpdate = 4893 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4894 auto *I = cast<Instruction>(U); 4895 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4896 isVectorizedMemAccessUse(I, IndUpdate); 4897 }); 4898 if (!UniformIndUpdate) 4899 continue; 4900 4901 // The induction variable and its update instruction will remain uniform. 4902 addToWorklistIfAllowed(Ind); 4903 addToWorklistIfAllowed(IndUpdate); 4904 } 4905 4906 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4907 } 4908 4909 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4910 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4911 4912 if (Legal->getRuntimePointerChecking()->Need) { 4913 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4914 "runtime pointer checks needed. Enable vectorization of this " 4915 "loop with '#pragma clang loop vectorize(enable)' when " 4916 "compiling with -Os/-Oz", 4917 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4918 return true; 4919 } 4920 4921 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4922 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4923 "runtime SCEV checks needed. Enable vectorization of this " 4924 "loop with '#pragma clang loop vectorize(enable)' when " 4925 "compiling with -Os/-Oz", 4926 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4927 return true; 4928 } 4929 4930 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4931 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4932 reportVectorizationFailure("Runtime stride check is required with -Os/-Oz", 4933 "runtime stride == 1 checks needed. Enable vectorization of " 4934 "this loop with '#pragma clang loop vectorize(enable)' when " 4935 "compiling with -Os/-Oz", 4936 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4937 return true; 4938 } 4939 4940 return false; 4941 } 4942 4943 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() { 4944 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4945 // TODO: It may by useful to do since it's still likely to be dynamically 4946 // uniform if the target can skip. 4947 reportVectorizationFailure( 4948 "Not inserting runtime ptr check for divergent target", 4949 "runtime pointer checks needed. Not enabled for divergent target", 4950 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 4951 return None; 4952 } 4953 4954 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4955 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4956 if (TC == 1) { 4957 reportVectorizationFailure("Single iteration (non) loop", 4958 "loop trip count is one, irrelevant for vectorization", 4959 "SingleIterationLoop", ORE, TheLoop); 4960 return None; 4961 } 4962 4963 switch (ScalarEpilogueStatus) { 4964 case CM_ScalarEpilogueAllowed: 4965 return computeFeasibleMaxVF(TC); 4966 case CM_ScalarEpilogueNotNeededUsePredicate: 4967 LLVM_DEBUG( 4968 dbgs() << "LV: vector predicate hint/switch found.\n" 4969 << "LV: Not allowing scalar epilogue, creating predicated " 4970 << "vector loop.\n"); 4971 break; 4972 case CM_ScalarEpilogueNotAllowedLowTripLoop: 4973 // fallthrough as a special case of OptForSize 4974 case CM_ScalarEpilogueNotAllowedOptSize: 4975 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 4976 LLVM_DEBUG( 4977 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4978 else 4979 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 4980 << "count.\n"); 4981 4982 // Bail if runtime checks are required, which are not good when optimising 4983 // for size. 4984 if (runtimeChecksRequired()) 4985 return None; 4986 break; 4987 } 4988 4989 // Now try the tail folding 4990 4991 // Invalidate interleave groups that require an epilogue if we can't mask 4992 // the interleave-group. 4993 if (!useMaskedInterleavedAccesses(TTI)) { 4994 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 4995 "No decisions should have been taken at this point"); 4996 // Note: There is no need to invalidate any cost modeling decisions here, as 4997 // non where taken so far. 4998 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4999 } 5000 5001 unsigned MaxVF = computeFeasibleMaxVF(TC); 5002 if (TC > 0 && TC % MaxVF == 0) { 5003 // Accept MaxVF if we do not have a tail. 5004 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5005 return MaxVF; 5006 } 5007 5008 // If we don't know the precise trip count, or if the trip count that we 5009 // found modulo the vectorization factor is not zero, try to fold the tail 5010 // by masking. 5011 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5012 if (Legal->prepareToFoldTailByMasking()) { 5013 FoldTailByMasking = true; 5014 return MaxVF; 5015 } 5016 5017 if (TC == 0) { 5018 reportVectorizationFailure( 5019 "Unable to calculate the loop count due to complex control flow", 5020 "unable to calculate the loop count due to complex control flow", 5021 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5022 return None; 5023 } 5024 5025 reportVectorizationFailure( 5026 "Cannot optimize for size and vectorize at the same time.", 5027 "cannot optimize for size and vectorize at the same time. " 5028 "Enable vectorization of this loop with '#pragma clang loop " 5029 "vectorize(enable)' when compiling with -Os/-Oz", 5030 "NoTailLoopWithOptForSize", ORE, TheLoop); 5031 return None; 5032 } 5033 5034 unsigned 5035 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) { 5036 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5037 unsigned SmallestType, WidestType; 5038 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5039 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5040 5041 // Get the maximum safe dependence distance in bits computed by LAA. 5042 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5043 // the memory accesses that is most restrictive (involved in the smallest 5044 // dependence distance). 5045 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 5046 5047 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 5048 5049 unsigned MaxVectorSize = WidestRegister / WidestType; 5050 5051 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5052 << " / " << WidestType << " bits.\n"); 5053 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5054 << WidestRegister << " bits.\n"); 5055 5056 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 5057 " into one vector!"); 5058 if (MaxVectorSize == 0) { 5059 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5060 MaxVectorSize = 1; 5061 return MaxVectorSize; 5062 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 5063 isPowerOf2_32(ConstTripCount)) { 5064 // We need to clamp the VF to be the ConstTripCount. There is no point in 5065 // choosing a higher viable VF as done in the loop below. 5066 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5067 << ConstTripCount << "\n"); 5068 MaxVectorSize = ConstTripCount; 5069 return MaxVectorSize; 5070 } 5071 5072 unsigned MaxVF = MaxVectorSize; 5073 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5074 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5075 // Collect all viable vectorization factors larger than the default MaxVF 5076 // (i.e. MaxVectorSize). 5077 SmallVector<unsigned, 8> VFs; 5078 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5079 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 5080 VFs.push_back(VS); 5081 5082 // For each VF calculate its register usage. 5083 auto RUs = calculateRegisterUsage(VFs); 5084 5085 // Select the largest VF which doesn't require more registers than existing 5086 // ones. 5087 for (int i = RUs.size() - 1; i >= 0; --i) { 5088 bool Selected = true; 5089 for (auto& pair : RUs[i].MaxLocalUsers) { 5090 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5091 if (pair.second > TargetNumRegisters) 5092 Selected = false; 5093 } 5094 if (Selected) { 5095 MaxVF = VFs[i]; 5096 break; 5097 } 5098 } 5099 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 5100 if (MaxVF < MinVF) { 5101 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5102 << ") with target's minimum: " << MinVF << '\n'); 5103 MaxVF = MinVF; 5104 } 5105 } 5106 } 5107 return MaxVF; 5108 } 5109 5110 VectorizationFactor 5111 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 5112 float Cost = expectedCost(1).first; 5113 const float ScalarCost = Cost; 5114 unsigned Width = 1; 5115 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5116 5117 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5118 if (ForceVectorization && MaxVF > 1) { 5119 // Ignore scalar width, because the user explicitly wants vectorization. 5120 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5121 // evaluation. 5122 Cost = std::numeric_limits<float>::max(); 5123 } 5124 5125 for (unsigned i = 2; i <= MaxVF; i *= 2) { 5126 // Notice that the vector loop needs to be executed less times, so 5127 // we need to divide the cost of the vector loops by the width of 5128 // the vector elements. 5129 VectorizationCostTy C = expectedCost(i); 5130 float VectorCost = C.first / (float)i; 5131 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5132 << " costs: " << (int)VectorCost << ".\n"); 5133 if (!C.second && !ForceVectorization) { 5134 LLVM_DEBUG( 5135 dbgs() << "LV: Not considering vector loop of width " << i 5136 << " because it will not generate any vector instructions.\n"); 5137 continue; 5138 } 5139 if (VectorCost < Cost) { 5140 Cost = VectorCost; 5141 Width = i; 5142 } 5143 } 5144 5145 if (!EnableCondStoresVectorization && NumPredStores) { 5146 reportVectorizationFailure("There are conditional stores.", 5147 "store that is conditionally executed prevents vectorization", 5148 "ConditionalStore", ORE, TheLoop); 5149 Width = 1; 5150 Cost = ScalarCost; 5151 } 5152 5153 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5154 << "LV: Vectorization seems to be not beneficial, " 5155 << "but was forced by a user.\n"); 5156 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5157 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 5158 return Factor; 5159 } 5160 5161 std::pair<unsigned, unsigned> 5162 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5163 unsigned MinWidth = -1U; 5164 unsigned MaxWidth = 8; 5165 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5166 5167 // For each block. 5168 for (BasicBlock *BB : TheLoop->blocks()) { 5169 // For each instruction in the loop. 5170 for (Instruction &I : BB->instructionsWithoutDebug()) { 5171 Type *T = I.getType(); 5172 5173 // Skip ignored values. 5174 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 5175 continue; 5176 5177 // Only examine Loads, Stores and PHINodes. 5178 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5179 continue; 5180 5181 // Examine PHI nodes that are reduction variables. Update the type to 5182 // account for the recurrence type. 5183 if (auto *PN = dyn_cast<PHINode>(&I)) { 5184 if (!Legal->isReductionVariable(PN)) 5185 continue; 5186 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 5187 T = RdxDesc.getRecurrenceType(); 5188 } 5189 5190 // Examine the stored values. 5191 if (auto *ST = dyn_cast<StoreInst>(&I)) 5192 T = ST->getValueOperand()->getType(); 5193 5194 // Ignore loaded pointer types and stored pointer types that are not 5195 // vectorizable. 5196 // 5197 // FIXME: The check here attempts to predict whether a load or store will 5198 // be vectorized. We only know this for certain after a VF has 5199 // been selected. Here, we assume that if an access can be 5200 // vectorized, it will be. We should also look at extending this 5201 // optimization to non-pointer types. 5202 // 5203 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 5204 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 5205 continue; 5206 5207 MinWidth = std::min(MinWidth, 5208 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5209 MaxWidth = std::max(MaxWidth, 5210 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5211 } 5212 } 5213 5214 return {MinWidth, MaxWidth}; 5215 } 5216 5217 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF, 5218 unsigned LoopCost) { 5219 // -- The interleave heuristics -- 5220 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5221 // There are many micro-architectural considerations that we can't predict 5222 // at this level. For example, frontend pressure (on decode or fetch) due to 5223 // code size, or the number and capabilities of the execution ports. 5224 // 5225 // We use the following heuristics to select the interleave count: 5226 // 1. If the code has reductions, then we interleave to break the cross 5227 // iteration dependency. 5228 // 2. If the loop is really small, then we interleave to reduce the loop 5229 // overhead. 5230 // 3. We don't interleave if we think that we will spill registers to memory 5231 // due to the increased register pressure. 5232 5233 if (!isScalarEpilogueAllowed()) 5234 return 1; 5235 5236 // We used the distance for the interleave count. 5237 if (Legal->getMaxSafeDepDistBytes() != -1U) 5238 return 1; 5239 5240 // Do not interleave loops with a relatively small known or estimated trip 5241 // count. 5242 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5243 if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold) 5244 return 1; 5245 5246 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5247 // We divide by these constants so assume that we have at least one 5248 // instruction that uses at least one register. 5249 for (auto& pair : R.MaxLocalUsers) { 5250 pair.second = std::max(pair.second, 1U); 5251 } 5252 5253 // We calculate the interleave count using the following formula. 5254 // Subtract the number of loop invariants from the number of available 5255 // registers. These registers are used by all of the interleaved instances. 5256 // Next, divide the remaining registers by the number of registers that is 5257 // required by the loop, in order to estimate how many parallel instances 5258 // fit without causing spills. All of this is rounded down if necessary to be 5259 // a power of two. We want power of two interleave count to simplify any 5260 // addressing operations or alignment considerations. 5261 // We also want power of two interleave counts to ensure that the induction 5262 // variable of the vector loop wraps to zero, when tail is folded by masking; 5263 // this currently happens when OptForSize, in which case IC is set to 1 above. 5264 unsigned IC = UINT_MAX; 5265 5266 for (auto& pair : R.MaxLocalUsers) { 5267 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5268 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5269 << " registers of " 5270 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5271 if (VF == 1) { 5272 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5273 TargetNumRegisters = ForceTargetNumScalarRegs; 5274 } else { 5275 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5276 TargetNumRegisters = ForceTargetNumVectorRegs; 5277 } 5278 unsigned MaxLocalUsers = pair.second; 5279 unsigned LoopInvariantRegs = 0; 5280 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5281 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5282 5283 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5284 // Don't count the induction variable as interleaved. 5285 if (EnableIndVarRegisterHeur) { 5286 TmpIC = 5287 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5288 std::max(1U, (MaxLocalUsers - 1))); 5289 } 5290 5291 IC = std::min(IC, TmpIC); 5292 } 5293 5294 // Clamp the interleave ranges to reasonable counts. 5295 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5296 5297 // Check if the user has overridden the max. 5298 if (VF == 1) { 5299 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5300 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5301 } else { 5302 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5303 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5304 } 5305 5306 // If trip count is known or estimated compile time constant, limit the 5307 // interleave count to be less than the trip count divided by VF. 5308 if (BestKnownTC) { 5309 MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount); 5310 } 5311 5312 // If we did not calculate the cost for VF (because the user selected the VF) 5313 // then we calculate the cost of VF here. 5314 if (LoopCost == 0) 5315 LoopCost = expectedCost(VF).first; 5316 5317 assert(LoopCost && "Non-zero loop cost expected"); 5318 5319 // Clamp the calculated IC to be between the 1 and the max interleave count 5320 // that the target and trip count allows. 5321 if (IC > MaxInterleaveCount) 5322 IC = MaxInterleaveCount; 5323 else if (IC < 1) 5324 IC = 1; 5325 5326 // Interleave if we vectorized this loop and there is a reduction that could 5327 // benefit from interleaving. 5328 if (VF > 1 && !Legal->getReductionVars().empty()) { 5329 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5330 return IC; 5331 } 5332 5333 // Note that if we've already vectorized the loop we will have done the 5334 // runtime check and so interleaving won't require further checks. 5335 bool InterleavingRequiresRuntimePointerCheck = 5336 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5337 5338 // We want to interleave small loops in order to reduce the loop overhead and 5339 // potentially expose ILP opportunities. 5340 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5341 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5342 // We assume that the cost overhead is 1 and we use the cost model 5343 // to estimate the cost of the loop and interleave until the cost of the 5344 // loop overhead is about 5% of the cost of the loop. 5345 unsigned SmallIC = 5346 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5347 5348 // Interleave until store/load ports (estimated by max interleave count) are 5349 // saturated. 5350 unsigned NumStores = Legal->getNumStores(); 5351 unsigned NumLoads = Legal->getNumLoads(); 5352 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5353 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5354 5355 // If we have a scalar reduction (vector reductions are already dealt with 5356 // by this point), we can increase the critical path length if the loop 5357 // we're interleaving is inside another loop. Limit, by default to 2, so the 5358 // critical path only gets increased by one reduction operation. 5359 if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) { 5360 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5361 SmallIC = std::min(SmallIC, F); 5362 StoresIC = std::min(StoresIC, F); 5363 LoadsIC = std::min(LoadsIC, F); 5364 } 5365 5366 if (EnableLoadStoreRuntimeInterleave && 5367 std::max(StoresIC, LoadsIC) > SmallIC) { 5368 LLVM_DEBUG( 5369 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5370 return std::max(StoresIC, LoadsIC); 5371 } 5372 5373 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5374 return SmallIC; 5375 } 5376 5377 // Interleave if this is a large loop (small loops are already dealt with by 5378 // this point) that could benefit from interleaving. 5379 bool HasReductions = !Legal->getReductionVars().empty(); 5380 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5381 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5382 return IC; 5383 } 5384 5385 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5386 return 1; 5387 } 5388 5389 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5390 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5391 // This function calculates the register usage by measuring the highest number 5392 // of values that are alive at a single location. Obviously, this is a very 5393 // rough estimation. We scan the loop in a topological order in order and 5394 // assign a number to each instruction. We use RPO to ensure that defs are 5395 // met before their users. We assume that each instruction that has in-loop 5396 // users starts an interval. We record every time that an in-loop value is 5397 // used, so we have a list of the first and last occurrences of each 5398 // instruction. Next, we transpose this data structure into a multi map that 5399 // holds the list of intervals that *end* at a specific location. This multi 5400 // map allows us to perform a linear search. We scan the instructions linearly 5401 // and record each time that a new interval starts, by placing it in a set. 5402 // If we find this value in the multi-map then we remove it from the set. 5403 // The max register usage is the maximum size of the set. 5404 // We also search for instructions that are defined outside the loop, but are 5405 // used inside the loop. We need this number separately from the max-interval 5406 // usage number because when we unroll, loop-invariant values do not take 5407 // more register. 5408 LoopBlocksDFS DFS(TheLoop); 5409 DFS.perform(LI); 5410 5411 RegisterUsage RU; 5412 5413 // Each 'key' in the map opens a new interval. The values 5414 // of the map are the index of the 'last seen' usage of the 5415 // instruction that is the key. 5416 using IntervalMap = DenseMap<Instruction *, unsigned>; 5417 5418 // Maps instruction to its index. 5419 SmallVector<Instruction *, 64> IdxToInstr; 5420 // Marks the end of each interval. 5421 IntervalMap EndPoint; 5422 // Saves the list of instruction indices that are used in the loop. 5423 SmallPtrSet<Instruction *, 8> Ends; 5424 // Saves the list of values that are used in the loop but are 5425 // defined outside the loop, such as arguments and constants. 5426 SmallPtrSet<Value *, 8> LoopInvariants; 5427 5428 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5429 for (Instruction &I : BB->instructionsWithoutDebug()) { 5430 IdxToInstr.push_back(&I); 5431 5432 // Save the end location of each USE. 5433 for (Value *U : I.operands()) { 5434 auto *Instr = dyn_cast<Instruction>(U); 5435 5436 // Ignore non-instruction values such as arguments, constants, etc. 5437 if (!Instr) 5438 continue; 5439 5440 // If this instruction is outside the loop then record it and continue. 5441 if (!TheLoop->contains(Instr)) { 5442 LoopInvariants.insert(Instr); 5443 continue; 5444 } 5445 5446 // Overwrite previous end points. 5447 EndPoint[Instr] = IdxToInstr.size(); 5448 Ends.insert(Instr); 5449 } 5450 } 5451 } 5452 5453 // Saves the list of intervals that end with the index in 'key'. 5454 using InstrList = SmallVector<Instruction *, 2>; 5455 DenseMap<unsigned, InstrList> TransposeEnds; 5456 5457 // Transpose the EndPoints to a list of values that end at each index. 5458 for (auto &Interval : EndPoint) 5459 TransposeEnds[Interval.second].push_back(Interval.first); 5460 5461 SmallPtrSet<Instruction *, 8> OpenIntervals; 5462 5463 // Get the size of the widest register. 5464 unsigned MaxSafeDepDist = -1U; 5465 if (Legal->getMaxSafeDepDistBytes() != -1U) 5466 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5467 unsigned WidestRegister = 5468 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5469 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5470 5471 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5472 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5473 5474 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5475 5476 // A lambda that gets the register usage for the given type and VF. 5477 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5478 if (Ty->isTokenTy()) 5479 return 0U; 5480 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5481 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5482 }; 5483 5484 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5485 Instruction *I = IdxToInstr[i]; 5486 5487 // Remove all of the instructions that end at this location. 5488 InstrList &List = TransposeEnds[i]; 5489 for (Instruction *ToRemove : List) 5490 OpenIntervals.erase(ToRemove); 5491 5492 // Ignore instructions that are never used within the loop. 5493 if (Ends.find(I) == Ends.end()) 5494 continue; 5495 5496 // Skip ignored values. 5497 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5498 continue; 5499 5500 // For each VF find the maximum usage of registers. 5501 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5502 // Count the number of live intervals. 5503 SmallMapVector<unsigned, unsigned, 4> RegUsage; 5504 5505 if (VFs[j] == 1) { 5506 for (auto Inst : OpenIntervals) { 5507 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5508 if (RegUsage.find(ClassID) == RegUsage.end()) 5509 RegUsage[ClassID] = 1; 5510 else 5511 RegUsage[ClassID] += 1; 5512 } 5513 } else { 5514 collectUniformsAndScalars(VFs[j]); 5515 for (auto Inst : OpenIntervals) { 5516 // Skip ignored values for VF > 1. 5517 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end()) 5518 continue; 5519 if (isScalarAfterVectorization(Inst, VFs[j])) { 5520 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5521 if (RegUsage.find(ClassID) == RegUsage.end()) 5522 RegUsage[ClassID] = 1; 5523 else 5524 RegUsage[ClassID] += 1; 5525 } else { 5526 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 5527 if (RegUsage.find(ClassID) == RegUsage.end()) 5528 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 5529 else 5530 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 5531 } 5532 } 5533 } 5534 5535 for (auto& pair : RegUsage) { 5536 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 5537 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 5538 else 5539 MaxUsages[j][pair.first] = pair.second; 5540 } 5541 } 5542 5543 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5544 << OpenIntervals.size() << '\n'); 5545 5546 // Add the current instruction to the list of open intervals. 5547 OpenIntervals.insert(I); 5548 } 5549 5550 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5551 SmallMapVector<unsigned, unsigned, 4> Invariant; 5552 5553 for (auto Inst : LoopInvariants) { 5554 unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 5555 unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType()); 5556 if (Invariant.find(ClassID) == Invariant.end()) 5557 Invariant[ClassID] = Usage; 5558 else 5559 Invariant[ClassID] += Usage; 5560 } 5561 5562 LLVM_DEBUG({ 5563 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 5564 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 5565 << " item\n"; 5566 for (const auto &pair : MaxUsages[i]) { 5567 dbgs() << "LV(REG): RegisterClass: " 5568 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 5569 << " registers\n"; 5570 } 5571 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 5572 << " item\n"; 5573 for (const auto &pair : Invariant) { 5574 dbgs() << "LV(REG): RegisterClass: " 5575 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 5576 << " registers\n"; 5577 } 5578 }); 5579 5580 RU.LoopInvariantRegs = Invariant; 5581 RU.MaxLocalUsers = MaxUsages[i]; 5582 RUs[i] = RU; 5583 } 5584 5585 return RUs; 5586 } 5587 5588 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5589 // TODO: Cost model for emulated masked load/store is completely 5590 // broken. This hack guides the cost model to use an artificially 5591 // high enough value to practically disable vectorization with such 5592 // operations, except where previously deployed legality hack allowed 5593 // using very low cost values. This is to avoid regressions coming simply 5594 // from moving "masked load/store" check from legality to cost model. 5595 // Masked Load/Gather emulation was previously never allowed. 5596 // Limited number of Masked Store/Scatter emulation was allowed. 5597 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5598 return isa<LoadInst>(I) || 5599 (isa<StoreInst>(I) && 5600 NumPredStores > NumberOfStoresToPredicate); 5601 } 5602 5603 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5604 // If we aren't vectorizing the loop, or if we've already collected the 5605 // instructions to scalarize, there's nothing to do. Collection may already 5606 // have occurred if we have a user-selected VF and are now computing the 5607 // expected cost for interleaving. 5608 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5609 return; 5610 5611 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5612 // not profitable to scalarize any instructions, the presence of VF in the 5613 // map will indicate that we've analyzed it already. 5614 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5615 5616 // Find all the instructions that are scalar with predication in the loop and 5617 // determine if it would be better to not if-convert the blocks they are in. 5618 // If so, we also record the instructions to scalarize. 5619 for (BasicBlock *BB : TheLoop->blocks()) { 5620 if (!blockNeedsPredication(BB)) 5621 continue; 5622 for (Instruction &I : *BB) 5623 if (isScalarWithPredication(&I)) { 5624 ScalarCostsTy ScalarCosts; 5625 // Do not apply discount logic if hacked cost is needed 5626 // for emulated masked memrefs. 5627 if (!useEmulatedMaskMemRefHack(&I) && 5628 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5629 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5630 // Remember that BB will remain after vectorization. 5631 PredicatedBBsAfterVectorization.insert(BB); 5632 } 5633 } 5634 } 5635 5636 int LoopVectorizationCostModel::computePredInstDiscount( 5637 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5638 unsigned VF) { 5639 assert(!isUniformAfterVectorization(PredInst, VF) && 5640 "Instruction marked uniform-after-vectorization will be predicated"); 5641 5642 // Initialize the discount to zero, meaning that the scalar version and the 5643 // vector version cost the same. 5644 int Discount = 0; 5645 5646 // Holds instructions to analyze. The instructions we visit are mapped in 5647 // ScalarCosts. Those instructions are the ones that would be scalarized if 5648 // we find that the scalar version costs less. 5649 SmallVector<Instruction *, 8> Worklist; 5650 5651 // Returns true if the given instruction can be scalarized. 5652 auto canBeScalarized = [&](Instruction *I) -> bool { 5653 // We only attempt to scalarize instructions forming a single-use chain 5654 // from the original predicated block that would otherwise be vectorized. 5655 // Although not strictly necessary, we give up on instructions we know will 5656 // already be scalar to avoid traversing chains that are unlikely to be 5657 // beneficial. 5658 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5659 isScalarAfterVectorization(I, VF)) 5660 return false; 5661 5662 // If the instruction is scalar with predication, it will be analyzed 5663 // separately. We ignore it within the context of PredInst. 5664 if (isScalarWithPredication(I)) 5665 return false; 5666 5667 // If any of the instruction's operands are uniform after vectorization, 5668 // the instruction cannot be scalarized. This prevents, for example, a 5669 // masked load from being scalarized. 5670 // 5671 // We assume we will only emit a value for lane zero of an instruction 5672 // marked uniform after vectorization, rather than VF identical values. 5673 // Thus, if we scalarize an instruction that uses a uniform, we would 5674 // create uses of values corresponding to the lanes we aren't emitting code 5675 // for. This behavior can be changed by allowing getScalarValue to clone 5676 // the lane zero values for uniforms rather than asserting. 5677 for (Use &U : I->operands()) 5678 if (auto *J = dyn_cast<Instruction>(U.get())) 5679 if (isUniformAfterVectorization(J, VF)) 5680 return false; 5681 5682 // Otherwise, we can scalarize the instruction. 5683 return true; 5684 }; 5685 5686 // Compute the expected cost discount from scalarizing the entire expression 5687 // feeding the predicated instruction. We currently only consider expressions 5688 // that are single-use instruction chains. 5689 Worklist.push_back(PredInst); 5690 while (!Worklist.empty()) { 5691 Instruction *I = Worklist.pop_back_val(); 5692 5693 // If we've already analyzed the instruction, there's nothing to do. 5694 if (ScalarCosts.find(I) != ScalarCosts.end()) 5695 continue; 5696 5697 // Compute the cost of the vector instruction. Note that this cost already 5698 // includes the scalarization overhead of the predicated instruction. 5699 unsigned VectorCost = getInstructionCost(I, VF).first; 5700 5701 // Compute the cost of the scalarized instruction. This cost is the cost of 5702 // the instruction as if it wasn't if-converted and instead remained in the 5703 // predicated block. We will scale this cost by block probability after 5704 // computing the scalarization overhead. 5705 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5706 5707 // Compute the scalarization overhead of needed insertelement instructions 5708 // and phi nodes. 5709 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5710 ScalarCost += TTI.getScalarizationOverhead( 5711 cast<VectorType>(ToVectorTy(I->getType(), VF)), 5712 APInt::getAllOnesValue(VF), true, false); 5713 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5714 } 5715 5716 // Compute the scalarization overhead of needed extractelement 5717 // instructions. For each of the instruction's operands, if the operand can 5718 // be scalarized, add it to the worklist; otherwise, account for the 5719 // overhead. 5720 for (Use &U : I->operands()) 5721 if (auto *J = dyn_cast<Instruction>(U.get())) { 5722 assert(VectorType::isValidElementType(J->getType()) && 5723 "Instruction has non-scalar type"); 5724 if (canBeScalarized(J)) 5725 Worklist.push_back(J); 5726 else if (needsExtract(J, VF)) 5727 ScalarCost += TTI.getScalarizationOverhead( 5728 cast<VectorType>(ToVectorTy(J->getType(), VF)), 5729 APInt::getAllOnesValue(VF), false, true); 5730 } 5731 5732 // Scale the total scalar cost by block probability. 5733 ScalarCost /= getReciprocalPredBlockProb(); 5734 5735 // Compute the discount. A non-negative discount means the vector version 5736 // of the instruction costs more, and scalarizing would be beneficial. 5737 Discount += VectorCost - ScalarCost; 5738 ScalarCosts[I] = ScalarCost; 5739 } 5740 5741 return Discount; 5742 } 5743 5744 LoopVectorizationCostModel::VectorizationCostTy 5745 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5746 VectorizationCostTy Cost; 5747 5748 // For each block. 5749 for (BasicBlock *BB : TheLoop->blocks()) { 5750 VectorizationCostTy BlockCost; 5751 5752 // For each instruction in the old loop. 5753 for (Instruction &I : BB->instructionsWithoutDebug()) { 5754 // Skip ignored values. 5755 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5756 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5757 continue; 5758 5759 VectorizationCostTy C = getInstructionCost(&I, VF); 5760 5761 // Check if we should override the cost. 5762 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5763 C.first = ForceTargetInstructionCost; 5764 5765 BlockCost.first += C.first; 5766 BlockCost.second |= C.second; 5767 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5768 << " for VF " << VF << " For instruction: " << I 5769 << '\n'); 5770 } 5771 5772 // If we are vectorizing a predicated block, it will have been 5773 // if-converted. This means that the block's instructions (aside from 5774 // stores and instructions that may divide by zero) will now be 5775 // unconditionally executed. For the scalar case, we may not always execute 5776 // the predicated block. Thus, scale the block's cost by the probability of 5777 // executing it. 5778 if (VF == 1 && blockNeedsPredication(BB)) 5779 BlockCost.first /= getReciprocalPredBlockProb(); 5780 5781 Cost.first += BlockCost.first; 5782 Cost.second |= BlockCost.second; 5783 } 5784 5785 return Cost; 5786 } 5787 5788 /// Gets Address Access SCEV after verifying that the access pattern 5789 /// is loop invariant except the induction variable dependence. 5790 /// 5791 /// This SCEV can be sent to the Target in order to estimate the address 5792 /// calculation cost. 5793 static const SCEV *getAddressAccessSCEV( 5794 Value *Ptr, 5795 LoopVectorizationLegality *Legal, 5796 PredicatedScalarEvolution &PSE, 5797 const Loop *TheLoop) { 5798 5799 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5800 if (!Gep) 5801 return nullptr; 5802 5803 // We are looking for a gep with all loop invariant indices except for one 5804 // which should be an induction variable. 5805 auto SE = PSE.getSE(); 5806 unsigned NumOperands = Gep->getNumOperands(); 5807 for (unsigned i = 1; i < NumOperands; ++i) { 5808 Value *Opd = Gep->getOperand(i); 5809 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5810 !Legal->isInductionVariable(Opd)) 5811 return nullptr; 5812 } 5813 5814 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5815 return PSE.getSCEV(Ptr); 5816 } 5817 5818 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5819 return Legal->hasStride(I->getOperand(0)) || 5820 Legal->hasStride(I->getOperand(1)); 5821 } 5822 5823 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5824 unsigned VF) { 5825 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5826 Type *ValTy = getMemInstValueType(I); 5827 auto SE = PSE.getSE(); 5828 5829 unsigned AS = getLoadStoreAddressSpace(I); 5830 Value *Ptr = getLoadStorePointerOperand(I); 5831 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5832 5833 // Figure out whether the access is strided and get the stride value 5834 // if it's known in compile time 5835 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5836 5837 // Get the cost of the scalar memory instruction and address computation. 5838 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5839 5840 // Don't pass *I here, since it is scalar but will actually be part of a 5841 // vectorized loop where the user of it is a vectorized instruction. 5842 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5843 Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 5844 Alignment, AS, 5845 TTI::TCK_RecipThroughput); 5846 5847 // Get the overhead of the extractelement and insertelement instructions 5848 // we might create due to scalarization. 5849 Cost += getScalarizationOverhead(I, VF); 5850 5851 // If we have a predicated store, it may not be executed for each vector 5852 // lane. Scale the cost by the probability of executing the predicated 5853 // block. 5854 if (isPredicatedInst(I)) { 5855 Cost /= getReciprocalPredBlockProb(); 5856 5857 if (useEmulatedMaskMemRefHack(I)) 5858 // Artificially setting to a high enough value to practically disable 5859 // vectorization with such operations. 5860 Cost = 3000000; 5861 } 5862 5863 return Cost; 5864 } 5865 5866 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5867 unsigned VF) { 5868 Type *ValTy = getMemInstValueType(I); 5869 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5870 Value *Ptr = getLoadStorePointerOperand(I); 5871 unsigned AS = getLoadStoreAddressSpace(I); 5872 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5873 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 5874 5875 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5876 "Stride should be 1 or -1 for consecutive memory access"); 5877 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5878 unsigned Cost = 0; 5879 if (Legal->isMaskRequired(I)) 5880 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, 5881 Alignment ? Alignment->value() : 0, AS, 5882 CostKind); 5883 else 5884 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 5885 CostKind, I); 5886 5887 bool Reverse = ConsecutiveStride < 0; 5888 if (Reverse) 5889 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5890 return Cost; 5891 } 5892 5893 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5894 unsigned VF) { 5895 Type *ValTy = getMemInstValueType(I); 5896 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5897 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5898 unsigned AS = getLoadStoreAddressSpace(I); 5899 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 5900 if (isa<LoadInst>(I)) { 5901 return TTI.getAddressComputationCost(ValTy) + 5902 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 5903 CostKind) + 5904 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5905 } 5906 StoreInst *SI = cast<StoreInst>(I); 5907 5908 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5909 return TTI.getAddressComputationCost(ValTy) + 5910 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 5911 CostKind) + 5912 (isLoopInvariantStoreValue 5913 ? 0 5914 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 5915 VF - 1)); 5916 } 5917 5918 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5919 unsigned VF) { 5920 Type *ValTy = getMemInstValueType(I); 5921 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5922 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5923 Value *Ptr = getLoadStorePointerOperand(I); 5924 5925 return TTI.getAddressComputationCost(VectorTy) + 5926 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5927 Legal->isMaskRequired(I), 5928 Alignment ? Alignment->value() : 0, 5929 TargetTransformInfo::TCK_RecipThroughput, 5930 I); 5931 } 5932 5933 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5934 unsigned VF) { 5935 Type *ValTy = getMemInstValueType(I); 5936 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5937 unsigned AS = getLoadStoreAddressSpace(I); 5938 5939 auto Group = getInterleavedAccessGroup(I); 5940 assert(Group && "Fail to get an interleaved access group."); 5941 5942 unsigned InterleaveFactor = Group->getFactor(); 5943 VectorType *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5944 5945 // Holds the indices of existing members in an interleaved load group. 5946 // An interleaved store group doesn't need this as it doesn't allow gaps. 5947 SmallVector<unsigned, 4> Indices; 5948 if (isa<LoadInst>(I)) { 5949 for (unsigned i = 0; i < InterleaveFactor; i++) 5950 if (Group->getMember(i)) 5951 Indices.push_back(i); 5952 } 5953 5954 // Calculate the cost of the whole interleaved group. 5955 bool UseMaskForGaps = 5956 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5957 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5958 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5959 Group->getAlign().value(), AS, TTI::TCK_RecipThroughput, 5960 Legal->isMaskRequired(I), UseMaskForGaps); 5961 5962 if (Group->isReverse()) { 5963 // TODO: Add support for reversed masked interleaved access. 5964 assert(!Legal->isMaskRequired(I) && 5965 "Reverse masked interleaved access not supported."); 5966 Cost += Group->getNumMembers() * 5967 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5968 } 5969 return Cost; 5970 } 5971 5972 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5973 unsigned VF) { 5974 // Calculate scalar cost only. Vectorization cost should be ready at this 5975 // moment. 5976 if (VF == 1) { 5977 Type *ValTy = getMemInstValueType(I); 5978 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5979 unsigned AS = getLoadStoreAddressSpace(I); 5980 5981 return TTI.getAddressComputationCost(ValTy) + 5982 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 5983 TTI::TCK_RecipThroughput, I); 5984 } 5985 return getWideningCost(I, VF); 5986 } 5987 5988 LoopVectorizationCostModel::VectorizationCostTy 5989 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5990 // If we know that this instruction will remain uniform, check the cost of 5991 // the scalar version. 5992 if (isUniformAfterVectorization(I, VF)) 5993 VF = 1; 5994 5995 if (VF > 1 && isProfitableToScalarize(I, VF)) 5996 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5997 5998 // Forced scalars do not have any scalarization overhead. 5999 auto ForcedScalar = ForcedScalars.find(VF); 6000 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 6001 auto InstSet = ForcedScalar->second; 6002 if (InstSet.find(I) != InstSet.end()) 6003 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 6004 } 6005 6006 Type *VectorTy; 6007 unsigned C = getInstructionCost(I, VF, VectorTy); 6008 6009 bool TypeNotScalarized = 6010 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 6011 return VectorizationCostTy(C, TypeNotScalarized); 6012 } 6013 6014 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6015 unsigned VF) { 6016 6017 if (VF == 1) 6018 return 0; 6019 6020 unsigned Cost = 0; 6021 Type *RetTy = ToVectorTy(I->getType(), VF); 6022 if (!RetTy->isVoidTy() && 6023 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6024 Cost += TTI.getScalarizationOverhead( 6025 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF), true, false); 6026 6027 // Some targets keep addresses scalar. 6028 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6029 return Cost; 6030 6031 // Some targets support efficient element stores. 6032 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6033 return Cost; 6034 6035 // Collect operands to consider. 6036 CallInst *CI = dyn_cast<CallInst>(I); 6037 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 6038 6039 // Skip operands that do not require extraction/scalarization and do not incur 6040 // any overhead. 6041 return Cost + TTI.getOperandsScalarizationOverhead( 6042 filterExtractingOperands(Ops, VF), VF); 6043 } 6044 6045 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 6046 if (VF == 1) 6047 return; 6048 NumPredStores = 0; 6049 for (BasicBlock *BB : TheLoop->blocks()) { 6050 // For each instruction in the old loop. 6051 for (Instruction &I : *BB) { 6052 Value *Ptr = getLoadStorePointerOperand(&I); 6053 if (!Ptr) 6054 continue; 6055 6056 // TODO: We should generate better code and update the cost model for 6057 // predicated uniform stores. Today they are treated as any other 6058 // predicated store (see added test cases in 6059 // invariant-store-vectorization.ll). 6060 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 6061 NumPredStores++; 6062 6063 if (Legal->isUniform(Ptr) && 6064 // Conditional loads and stores should be scalarized and predicated. 6065 // isScalarWithPredication cannot be used here since masked 6066 // gather/scatters are not considered scalar with predication. 6067 !Legal->blockNeedsPredication(I.getParent())) { 6068 // TODO: Avoid replicating loads and stores instead of 6069 // relying on instcombine to remove them. 6070 // Load: Scalar load + broadcast 6071 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6072 unsigned Cost = getUniformMemOpCost(&I, VF); 6073 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6074 continue; 6075 } 6076 6077 // We assume that widening is the best solution when possible. 6078 if (memoryInstructionCanBeWidened(&I, VF)) { 6079 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 6080 int ConsecutiveStride = 6081 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 6082 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6083 "Expected consecutive stride."); 6084 InstWidening Decision = 6085 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6086 setWideningDecision(&I, VF, Decision, Cost); 6087 continue; 6088 } 6089 6090 // Choose between Interleaving, Gather/Scatter or Scalarization. 6091 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 6092 unsigned NumAccesses = 1; 6093 if (isAccessInterleaved(&I)) { 6094 auto Group = getInterleavedAccessGroup(&I); 6095 assert(Group && "Fail to get an interleaved access group."); 6096 6097 // Make one decision for the whole group. 6098 if (getWideningDecision(&I, VF) != CM_Unknown) 6099 continue; 6100 6101 NumAccesses = Group->getNumMembers(); 6102 if (interleavedAccessCanBeWidened(&I, VF)) 6103 InterleaveCost = getInterleaveGroupCost(&I, VF); 6104 } 6105 6106 unsigned GatherScatterCost = 6107 isLegalGatherOrScatter(&I) 6108 ? getGatherScatterCost(&I, VF) * NumAccesses 6109 : std::numeric_limits<unsigned>::max(); 6110 6111 unsigned ScalarizationCost = 6112 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6113 6114 // Choose better solution for the current VF, 6115 // write down this decision and use it during vectorization. 6116 unsigned Cost; 6117 InstWidening Decision; 6118 if (InterleaveCost <= GatherScatterCost && 6119 InterleaveCost < ScalarizationCost) { 6120 Decision = CM_Interleave; 6121 Cost = InterleaveCost; 6122 } else if (GatherScatterCost < ScalarizationCost) { 6123 Decision = CM_GatherScatter; 6124 Cost = GatherScatterCost; 6125 } else { 6126 Decision = CM_Scalarize; 6127 Cost = ScalarizationCost; 6128 } 6129 // If the instructions belongs to an interleave group, the whole group 6130 // receives the same decision. The whole group receives the cost, but 6131 // the cost will actually be assigned to one instruction. 6132 if (auto Group = getInterleavedAccessGroup(&I)) 6133 setWideningDecision(Group, VF, Decision, Cost); 6134 else 6135 setWideningDecision(&I, VF, Decision, Cost); 6136 } 6137 } 6138 6139 // Make sure that any load of address and any other address computation 6140 // remains scalar unless there is gather/scatter support. This avoids 6141 // inevitable extracts into address registers, and also has the benefit of 6142 // activating LSR more, since that pass can't optimize vectorized 6143 // addresses. 6144 if (TTI.prefersVectorizedAddressing()) 6145 return; 6146 6147 // Start with all scalar pointer uses. 6148 SmallPtrSet<Instruction *, 8> AddrDefs; 6149 for (BasicBlock *BB : TheLoop->blocks()) 6150 for (Instruction &I : *BB) { 6151 Instruction *PtrDef = 6152 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6153 if (PtrDef && TheLoop->contains(PtrDef) && 6154 getWideningDecision(&I, VF) != CM_GatherScatter) 6155 AddrDefs.insert(PtrDef); 6156 } 6157 6158 // Add all instructions used to generate the addresses. 6159 SmallVector<Instruction *, 4> Worklist; 6160 for (auto *I : AddrDefs) 6161 Worklist.push_back(I); 6162 while (!Worklist.empty()) { 6163 Instruction *I = Worklist.pop_back_val(); 6164 for (auto &Op : I->operands()) 6165 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6166 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6167 AddrDefs.insert(InstOp).second) 6168 Worklist.push_back(InstOp); 6169 } 6170 6171 for (auto *I : AddrDefs) { 6172 if (isa<LoadInst>(I)) { 6173 // Setting the desired widening decision should ideally be handled in 6174 // by cost functions, but since this involves the task of finding out 6175 // if the loaded register is involved in an address computation, it is 6176 // instead changed here when we know this is the case. 6177 InstWidening Decision = getWideningDecision(I, VF); 6178 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6179 // Scalarize a widened load of address. 6180 setWideningDecision(I, VF, CM_Scalarize, 6181 (VF * getMemoryInstructionCost(I, 1))); 6182 else if (auto Group = getInterleavedAccessGroup(I)) { 6183 // Scalarize an interleave group of address loads. 6184 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6185 if (Instruction *Member = Group->getMember(I)) 6186 setWideningDecision(Member, VF, CM_Scalarize, 6187 (VF * getMemoryInstructionCost(Member, 1))); 6188 } 6189 } 6190 } else 6191 // Make sure I gets scalarized and a cost estimate without 6192 // scalarization overhead. 6193 ForcedScalars[VF].insert(I); 6194 } 6195 } 6196 6197 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6198 unsigned VF, 6199 Type *&VectorTy) { 6200 Type *RetTy = I->getType(); 6201 if (canTruncateToMinimalBitwidth(I, VF)) 6202 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6203 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 6204 auto SE = PSE.getSE(); 6205 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6206 6207 // TODO: We need to estimate the cost of intrinsic calls. 6208 switch (I->getOpcode()) { 6209 case Instruction::GetElementPtr: 6210 // We mark this instruction as zero-cost because the cost of GEPs in 6211 // vectorized code depends on whether the corresponding memory instruction 6212 // is scalarized or not. Therefore, we handle GEPs with the memory 6213 // instruction cost. 6214 return 0; 6215 case Instruction::Br: { 6216 // In cases of scalarized and predicated instructions, there will be VF 6217 // predicated blocks in the vectorized loop. Each branch around these 6218 // blocks requires also an extract of its vector compare i1 element. 6219 bool ScalarPredicatedBB = false; 6220 BranchInst *BI = cast<BranchInst>(I); 6221 if (VF > 1 && BI->isConditional() && 6222 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 6223 PredicatedBBsAfterVectorization.end() || 6224 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 6225 PredicatedBBsAfterVectorization.end())) 6226 ScalarPredicatedBB = true; 6227 6228 if (ScalarPredicatedBB) { 6229 // Return cost for branches around scalarized and predicated blocks. 6230 VectorType *Vec_i1Ty = 6231 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 6232 return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF), 6233 false, true) + 6234 (TTI.getCFInstrCost(Instruction::Br) * VF)); 6235 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 6236 // The back-edge branch will remain, as will all scalar branches. 6237 return TTI.getCFInstrCost(Instruction::Br); 6238 else 6239 // This branch will be eliminated by if-conversion. 6240 return 0; 6241 // Note: We currently assume zero cost for an unconditional branch inside 6242 // a predicated block since it will become a fall-through, although we 6243 // may decide in the future to call TTI for all branches. 6244 } 6245 case Instruction::PHI: { 6246 auto *Phi = cast<PHINode>(I); 6247 6248 // First-order recurrences are replaced by vector shuffles inside the loop. 6249 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 6250 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6251 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6252 cast<VectorType>(VectorTy), VF - 1, 6253 VectorType::get(RetTy, 1)); 6254 6255 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 6256 // converted into select instructions. We require N - 1 selects per phi 6257 // node, where N is the number of incoming values. 6258 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 6259 return (Phi->getNumIncomingValues() - 1) * 6260 TTI.getCmpSelInstrCost( 6261 Instruction::Select, ToVectorTy(Phi->getType(), VF), 6262 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 6263 CostKind); 6264 6265 return TTI.getCFInstrCost(Instruction::PHI); 6266 } 6267 case Instruction::UDiv: 6268 case Instruction::SDiv: 6269 case Instruction::URem: 6270 case Instruction::SRem: 6271 // If we have a predicated instruction, it may not be executed for each 6272 // vector lane. Get the scalarization cost and scale this amount by the 6273 // probability of executing the predicated block. If the instruction is not 6274 // predicated, we fall through to the next case. 6275 if (VF > 1 && isScalarWithPredication(I)) { 6276 unsigned Cost = 0; 6277 6278 // These instructions have a non-void type, so account for the phi nodes 6279 // that we will create. This cost is likely to be zero. The phi node 6280 // cost, if any, should be scaled by the block probability because it 6281 // models a copy at the end of each predicated block. 6282 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 6283 6284 // The cost of the non-predicated instruction. 6285 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 6286 6287 // The cost of insertelement and extractelement instructions needed for 6288 // scalarization. 6289 Cost += getScalarizationOverhead(I, VF); 6290 6291 // Scale the cost by the probability of executing the predicated blocks. 6292 // This assumes the predicated block for each vector lane is equally 6293 // likely. 6294 return Cost / getReciprocalPredBlockProb(); 6295 } 6296 LLVM_FALLTHROUGH; 6297 case Instruction::Add: 6298 case Instruction::FAdd: 6299 case Instruction::Sub: 6300 case Instruction::FSub: 6301 case Instruction::Mul: 6302 case Instruction::FMul: 6303 case Instruction::FDiv: 6304 case Instruction::FRem: 6305 case Instruction::Shl: 6306 case Instruction::LShr: 6307 case Instruction::AShr: 6308 case Instruction::And: 6309 case Instruction::Or: 6310 case Instruction::Xor: { 6311 // Since we will replace the stride by 1 the multiplication should go away. 6312 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6313 return 0; 6314 // Certain instructions can be cheaper to vectorize if they have a constant 6315 // second vector operand. One example of this are shifts on x86. 6316 Value *Op2 = I->getOperand(1); 6317 TargetTransformInfo::OperandValueProperties Op2VP; 6318 TargetTransformInfo::OperandValueKind Op2VK = 6319 TTI.getOperandInfo(Op2, Op2VP); 6320 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 6321 Op2VK = TargetTransformInfo::OK_UniformValue; 6322 6323 SmallVector<const Value *, 4> Operands(I->operand_values()); 6324 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6325 return N * TTI.getArithmeticInstrCost( 6326 I->getOpcode(), VectorTy, CostKind, 6327 TargetTransformInfo::OK_AnyValue, 6328 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 6329 } 6330 case Instruction::FNeg: { 6331 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6332 return N * TTI.getArithmeticInstrCost( 6333 I->getOpcode(), VectorTy, CostKind, 6334 TargetTransformInfo::OK_AnyValue, 6335 TargetTransformInfo::OK_AnyValue, 6336 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 6337 I->getOperand(0), I); 6338 } 6339 case Instruction::Select: { 6340 SelectInst *SI = cast<SelectInst>(I); 6341 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6342 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6343 Type *CondTy = SI->getCondition()->getType(); 6344 if (!ScalarCond) 6345 CondTy = VectorType::get(CondTy, VF); 6346 6347 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 6348 CostKind, I); 6349 } 6350 case Instruction::ICmp: 6351 case Instruction::FCmp: { 6352 Type *ValTy = I->getOperand(0)->getType(); 6353 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6354 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 6355 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 6356 VectorTy = ToVectorTy(ValTy, VF); 6357 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, CostKind, 6358 I); 6359 } 6360 case Instruction::Store: 6361 case Instruction::Load: { 6362 unsigned Width = VF; 6363 if (Width > 1) { 6364 InstWidening Decision = getWideningDecision(I, Width); 6365 assert(Decision != CM_Unknown && 6366 "CM decision should be taken at this point"); 6367 if (Decision == CM_Scalarize) 6368 Width = 1; 6369 } 6370 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 6371 return getMemoryInstructionCost(I, VF); 6372 } 6373 case Instruction::ZExt: 6374 case Instruction::SExt: 6375 case Instruction::FPToUI: 6376 case Instruction::FPToSI: 6377 case Instruction::FPExt: 6378 case Instruction::PtrToInt: 6379 case Instruction::IntToPtr: 6380 case Instruction::SIToFP: 6381 case Instruction::UIToFP: 6382 case Instruction::Trunc: 6383 case Instruction::FPTrunc: 6384 case Instruction::BitCast: { 6385 // We optimize the truncation of induction variables having constant 6386 // integer steps. The cost of these truncations is the same as the scalar 6387 // operation. 6388 if (isOptimizableIVTruncate(I, VF)) { 6389 auto *Trunc = cast<TruncInst>(I); 6390 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 6391 Trunc->getSrcTy(), CostKind, Trunc); 6392 } 6393 6394 Type *SrcScalarTy = I->getOperand(0)->getType(); 6395 Type *SrcVecTy = 6396 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 6397 if (canTruncateToMinimalBitwidth(I, VF)) { 6398 // This cast is going to be shrunk. This may remove the cast or it might 6399 // turn it into slightly different cast. For example, if MinBW == 16, 6400 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6401 // 6402 // Calculate the modified src and dest types. 6403 Type *MinVecTy = VectorTy; 6404 if (I->getOpcode() == Instruction::Trunc) { 6405 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6406 VectorTy = 6407 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6408 } else if (I->getOpcode() == Instruction::ZExt || 6409 I->getOpcode() == Instruction::SExt) { 6410 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6411 VectorTy = 6412 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6413 } 6414 } 6415 6416 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6417 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, 6418 CostKind, I); 6419 } 6420 case Instruction::Call: { 6421 bool NeedToScalarize; 6422 CallInst *CI = cast<CallInst>(I); 6423 unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 6424 if (getVectorIntrinsicIDForCall(CI, TLI)) 6425 return std::min(CallCost, getVectorIntrinsicCost(CI, VF)); 6426 return CallCost; 6427 } 6428 default: 6429 // The cost of executing VF copies of the scalar instruction. This opcode 6430 // is unknown. Assume that it is the same as 'mul'. 6431 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, 6432 CostKind) + 6433 getScalarizationOverhead(I, VF); 6434 } // end of switch. 6435 } 6436 6437 char LoopVectorize::ID = 0; 6438 6439 static const char lv_name[] = "Loop Vectorization"; 6440 6441 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6442 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6443 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6444 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6445 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6446 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6447 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6448 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6449 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6450 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6451 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6452 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6453 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6454 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 6455 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 6456 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6457 6458 namespace llvm { 6459 6460 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 6461 6462 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6463 bool VectorizeOnlyWhenForced) { 6464 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6465 } 6466 6467 } // end namespace llvm 6468 6469 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6470 // Check if the pointer operand of a load or store instruction is 6471 // consecutive. 6472 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6473 return Legal->isConsecutivePtr(Ptr); 6474 return false; 6475 } 6476 6477 void LoopVectorizationCostModel::collectValuesToIgnore() { 6478 // Ignore ephemeral values. 6479 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6480 6481 // Ignore type-promoting instructions we identified during reduction 6482 // detection. 6483 for (auto &Reduction : Legal->getReductionVars()) { 6484 RecurrenceDescriptor &RedDes = Reduction.second; 6485 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6486 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6487 } 6488 // Ignore type-casting instructions we identified during induction 6489 // detection. 6490 for (auto &Induction : Legal->getInductionVars()) { 6491 InductionDescriptor &IndDes = Induction.second; 6492 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6493 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6494 } 6495 } 6496 6497 // TODO: we could return a pair of values that specify the max VF and 6498 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 6499 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 6500 // doesn't have a cost model that can choose which plan to execute if 6501 // more than one is generated. 6502 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 6503 LoopVectorizationCostModel &CM) { 6504 unsigned WidestType; 6505 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 6506 return WidestVectorRegBits / WidestType; 6507 } 6508 6509 VectorizationFactor 6510 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) { 6511 unsigned VF = UserVF; 6512 // Outer loop handling: They may require CFG and instruction level 6513 // transformations before even evaluating whether vectorization is profitable. 6514 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6515 // the vectorization pipeline. 6516 if (!OrigLoop->empty()) { 6517 // If the user doesn't provide a vectorization factor, determine a 6518 // reasonable one. 6519 if (!UserVF) { 6520 VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM); 6521 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 6522 6523 // Make sure we have a VF > 1 for stress testing. 6524 if (VPlanBuildStressTest && VF < 2) { 6525 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 6526 << "overriding computed VF.\n"); 6527 VF = 4; 6528 } 6529 } 6530 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6531 assert(isPowerOf2_32(VF) && "VF needs to be a power of two"); 6532 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF 6533 << " to build VPlans.\n"); 6534 buildVPlans(VF, VF); 6535 6536 // For VPlan build stress testing, we bail out after VPlan construction. 6537 if (VPlanBuildStressTest) 6538 return VectorizationFactor::Disabled(); 6539 6540 return {VF, 0}; 6541 } 6542 6543 LLVM_DEBUG( 6544 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6545 "VPlan-native path.\n"); 6546 return VectorizationFactor::Disabled(); 6547 } 6548 6549 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) { 6550 assert(OrigLoop->empty() && "Inner loop expected."); 6551 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(); 6552 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 6553 return None; 6554 6555 // Invalidate interleave groups if all blocks of loop will be predicated. 6556 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6557 !useMaskedInterleavedAccesses(*TTI)) { 6558 LLVM_DEBUG( 6559 dbgs() 6560 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6561 "which requires masked-interleaved support.\n"); 6562 if (CM.InterleaveInfo.invalidateGroups()) 6563 // Invalidating interleave groups also requires invalidating all decisions 6564 // based on them, which includes widening decisions and uniform and scalar 6565 // values. 6566 CM.invalidateCostModelingDecisions(); 6567 } 6568 6569 if (UserVF) { 6570 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6571 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6572 // Collect the instructions (and their associated costs) that will be more 6573 // profitable to scalarize. 6574 CM.selectUserVectorizationFactor(UserVF); 6575 buildVPlansWithVPRecipes(UserVF, UserVF); 6576 LLVM_DEBUG(printPlans(dbgs())); 6577 return {{UserVF, 0}}; 6578 } 6579 6580 unsigned MaxVF = MaybeMaxVF.getValue(); 6581 assert(MaxVF != 0 && "MaxVF is zero."); 6582 6583 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6584 // Collect Uniform and Scalar instructions after vectorization with VF. 6585 CM.collectUniformsAndScalars(VF); 6586 6587 // Collect the instructions (and their associated costs) that will be more 6588 // profitable to scalarize. 6589 if (VF > 1) 6590 CM.collectInstsToScalarize(VF); 6591 } 6592 6593 buildVPlansWithVPRecipes(1, MaxVF); 6594 LLVM_DEBUG(printPlans(dbgs())); 6595 if (MaxVF == 1) 6596 return VectorizationFactor::Disabled(); 6597 6598 // Select the optimal vectorization factor. 6599 return CM.selectVectorizationFactor(MaxVF); 6600 } 6601 6602 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6603 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6604 << '\n'); 6605 BestVF = VF; 6606 BestUF = UF; 6607 6608 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6609 return !Plan->hasVF(VF); 6610 }); 6611 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6612 } 6613 6614 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6615 DominatorTree *DT) { 6616 // Perform the actual loop transformation. 6617 6618 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6619 VPCallbackILV CallbackILV(ILV); 6620 6621 VPTransformState State{BestVF, BestUF, LI, 6622 DT, ILV.Builder, ILV.VectorLoopValueMap, 6623 &ILV, CallbackILV}; 6624 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6625 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6626 State.CanonicalIV = ILV.Induction; 6627 6628 //===------------------------------------------------===// 6629 // 6630 // Notice: any optimization or new instruction that go 6631 // into the code below should also be implemented in 6632 // the cost-model. 6633 // 6634 //===------------------------------------------------===// 6635 6636 // 2. Copy and widen instructions from the old loop into the new loop. 6637 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6638 VPlans.front()->execute(&State); 6639 6640 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6641 // predication, updating analyses. 6642 ILV.fixVectorizedLoop(); 6643 } 6644 6645 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6646 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6647 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6648 6649 // We create new control-flow for the vectorized loop, so the original 6650 // condition will be dead after vectorization if it's only used by the 6651 // branch. 6652 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6653 if (Cmp && Cmp->hasOneUse()) 6654 DeadInstructions.insert(Cmp); 6655 6656 // We create new "steps" for induction variable updates to which the original 6657 // induction variables map. An original update instruction will be dead if 6658 // all its users except the induction variable are dead. 6659 for (auto &Induction : Legal->getInductionVars()) { 6660 PHINode *Ind = Induction.first; 6661 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6662 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6663 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6664 DeadInstructions.end(); 6665 })) 6666 DeadInstructions.insert(IndUpdate); 6667 6668 // We record as "Dead" also the type-casting instructions we had identified 6669 // during induction analysis. We don't need any handling for them in the 6670 // vectorized loop because we have proven that, under a proper runtime 6671 // test guarding the vectorized loop, the value of the phi, and the casted 6672 // value of the phi, are the same. The last instruction in this casting chain 6673 // will get its scalar/vector/widened def from the scalar/vector/widened def 6674 // of the respective phi node. Any other casts in the induction def-use chain 6675 // have no other uses outside the phi update chain, and will be ignored. 6676 InductionDescriptor &IndDes = Induction.second; 6677 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6678 DeadInstructions.insert(Casts.begin(), Casts.end()); 6679 } 6680 } 6681 6682 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6683 6684 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6685 6686 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6687 Instruction::BinaryOps BinOp) { 6688 // When unrolling and the VF is 1, we only need to add a simple scalar. 6689 Type *Ty = Val->getType(); 6690 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6691 6692 if (Ty->isFloatingPointTy()) { 6693 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6694 6695 // Floating point operations had to be 'fast' to enable the unrolling. 6696 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6697 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6698 } 6699 Constant *C = ConstantInt::get(Ty, StartIdx); 6700 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6701 } 6702 6703 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6704 SmallVector<Metadata *, 4> MDs; 6705 // Reserve first location for self reference to the LoopID metadata node. 6706 MDs.push_back(nullptr); 6707 bool IsUnrollMetadata = false; 6708 MDNode *LoopID = L->getLoopID(); 6709 if (LoopID) { 6710 // First find existing loop unrolling disable metadata. 6711 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6712 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6713 if (MD) { 6714 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6715 IsUnrollMetadata = 6716 S && S->getString().startswith("llvm.loop.unroll.disable"); 6717 } 6718 MDs.push_back(LoopID->getOperand(i)); 6719 } 6720 } 6721 6722 if (!IsUnrollMetadata) { 6723 // Add runtime unroll disable metadata. 6724 LLVMContext &Context = L->getHeader()->getContext(); 6725 SmallVector<Metadata *, 1> DisableOperands; 6726 DisableOperands.push_back( 6727 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6728 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6729 MDs.push_back(DisableNode); 6730 MDNode *NewLoopID = MDNode::get(Context, MDs); 6731 // Set operand 0 to refer to the loop id itself. 6732 NewLoopID->replaceOperandWith(0, NewLoopID); 6733 L->setLoopID(NewLoopID); 6734 } 6735 } 6736 6737 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6738 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6739 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6740 bool PredicateAtRangeStart = Predicate(Range.Start); 6741 6742 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6743 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6744 Range.End = TmpVF; 6745 break; 6746 } 6747 6748 return PredicateAtRangeStart; 6749 } 6750 6751 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6752 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6753 /// of VF's starting at a given VF and extending it as much as possible. Each 6754 /// vectorization decision can potentially shorten this sub-range during 6755 /// buildVPlan(). 6756 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6757 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6758 VFRange SubRange = {VF, MaxVF + 1}; 6759 VPlans.push_back(buildVPlan(SubRange)); 6760 VF = SubRange.End; 6761 } 6762 } 6763 6764 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6765 VPlanPtr &Plan) { 6766 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6767 6768 // Look for cached value. 6769 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6770 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6771 if (ECEntryIt != EdgeMaskCache.end()) 6772 return ECEntryIt->second; 6773 6774 VPValue *SrcMask = createBlockInMask(Src, Plan); 6775 6776 // The terminator has to be a branch inst! 6777 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6778 assert(BI && "Unexpected terminator found"); 6779 6780 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 6781 return EdgeMaskCache[Edge] = SrcMask; 6782 6783 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6784 assert(EdgeMask && "No Edge Mask found for condition"); 6785 6786 if (BI->getSuccessor(0) != Dst) 6787 EdgeMask = Builder.createNot(EdgeMask); 6788 6789 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6790 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6791 6792 return EdgeMaskCache[Edge] = EdgeMask; 6793 } 6794 6795 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6796 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6797 6798 // Look for cached value. 6799 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6800 if (BCEntryIt != BlockMaskCache.end()) 6801 return BCEntryIt->second; 6802 6803 // All-one mask is modelled as no-mask following the convention for masked 6804 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6805 VPValue *BlockMask = nullptr; 6806 6807 if (OrigLoop->getHeader() == BB) { 6808 if (!CM.blockNeedsPredication(BB)) 6809 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6810 6811 // Introduce the early-exit compare IV <= BTC to form header block mask. 6812 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6813 // Start by constructing the desired canonical IV. 6814 VPValue *IV = nullptr; 6815 if (Legal->getPrimaryInduction()) 6816 IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6817 else { 6818 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 6819 Builder.getInsertBlock()->appendRecipe(IVRecipe); 6820 IV = IVRecipe->getVPValue(); 6821 } 6822 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6823 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6824 return BlockMaskCache[BB] = BlockMask; 6825 } 6826 6827 // This is the block mask. We OR all incoming edges. 6828 for (auto *Predecessor : predecessors(BB)) { 6829 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6830 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6831 return BlockMaskCache[BB] = EdgeMask; 6832 6833 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6834 BlockMask = EdgeMask; 6835 continue; 6836 } 6837 6838 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6839 } 6840 6841 return BlockMaskCache[BB] = BlockMask; 6842 } 6843 6844 VPWidenMemoryInstructionRecipe * 6845 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6846 VPlanPtr &Plan) { 6847 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6848 "Must be called with either a load or store"); 6849 6850 auto willWiden = [&](unsigned VF) -> bool { 6851 if (VF == 1) 6852 return false; 6853 LoopVectorizationCostModel::InstWidening Decision = 6854 CM.getWideningDecision(I, VF); 6855 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6856 "CM decision should be taken at this point."); 6857 if (Decision == LoopVectorizationCostModel::CM_Interleave) 6858 return true; 6859 if (CM.isScalarAfterVectorization(I, VF) || 6860 CM.isProfitableToScalarize(I, VF)) 6861 return false; 6862 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6863 }; 6864 6865 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6866 return nullptr; 6867 6868 VPValue *Mask = nullptr; 6869 if (Legal->isMaskRequired(I)) 6870 Mask = createBlockInMask(I->getParent(), Plan); 6871 6872 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 6873 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 6874 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 6875 6876 StoreInst *Store = cast<StoreInst>(I); 6877 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 6878 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 6879 } 6880 6881 VPWidenIntOrFpInductionRecipe * 6882 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const { 6883 // Check if this is an integer or fp induction. If so, build the recipe that 6884 // produces its scalar and vector values. 6885 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 6886 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6887 II.getKind() == InductionDescriptor::IK_FpInduction) 6888 return new VPWidenIntOrFpInductionRecipe(Phi); 6889 6890 return nullptr; 6891 } 6892 6893 VPWidenIntOrFpInductionRecipe * 6894 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, 6895 VFRange &Range) const { 6896 // Optimize the special case where the source is a constant integer 6897 // induction variable. Notice that we can only optimize the 'trunc' case 6898 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6899 // (c) other casts depend on pointer size. 6900 6901 // Determine whether \p K is a truncation based on an induction variable that 6902 // can be optimized. 6903 auto isOptimizableIVTruncate = 6904 [&](Instruction *K) -> std::function<bool(unsigned)> { 6905 return 6906 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6907 }; 6908 6909 if (LoopVectorizationPlanner::getDecisionAndClampRange( 6910 isOptimizableIVTruncate(I), Range)) 6911 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6912 I); 6913 return nullptr; 6914 } 6915 6916 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 6917 // We know that all PHIs in non-header blocks are converted into selects, so 6918 // we don't have to worry about the insertion order and we can just use the 6919 // builder. At this point we generate the predication tree. There may be 6920 // duplications since this is a simple recursive scan, but future 6921 // optimizations will clean it up. 6922 6923 SmallVector<VPValue *, 2> Operands; 6924 unsigned NumIncoming = Phi->getNumIncomingValues(); 6925 for (unsigned In = 0; In < NumIncoming; In++) { 6926 VPValue *EdgeMask = 6927 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6928 assert((EdgeMask || NumIncoming == 1) && 6929 "Multiple predecessors with one having a full mask"); 6930 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 6931 if (EdgeMask) 6932 Operands.push_back(EdgeMask); 6933 } 6934 return new VPBlendRecipe(Phi, Operands); 6935 } 6936 6937 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 6938 VPlan &Plan) const { 6939 6940 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6941 [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); }, 6942 Range); 6943 6944 if (IsPredicated) 6945 return nullptr; 6946 6947 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6948 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6949 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6950 return nullptr; 6951 6952 auto willWiden = [&](unsigned VF) -> bool { 6953 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6954 // The following case may be scalarized depending on the VF. 6955 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6956 // version of the instruction. 6957 // Is it beneficial to perform intrinsic call compared to lib call? 6958 bool NeedToScalarize = false; 6959 unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 6960 bool UseVectorIntrinsic = 6961 ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost; 6962 return UseVectorIntrinsic || !NeedToScalarize; 6963 }; 6964 6965 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6966 return nullptr; 6967 6968 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 6969 } 6970 6971 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 6972 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 6973 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 6974 // Instruction should be widened, unless it is scalar after vectorization, 6975 // scalarization is profitable or it is predicated. 6976 auto WillScalarize = [this, I](unsigned VF) -> bool { 6977 return CM.isScalarAfterVectorization(I, VF) || 6978 CM.isProfitableToScalarize(I, VF) || 6979 CM.isScalarWithPredication(I, VF); 6980 }; 6981 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 6982 Range); 6983 } 6984 6985 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 6986 auto IsVectorizableOpcode = [](unsigned Opcode) { 6987 switch (Opcode) { 6988 case Instruction::Add: 6989 case Instruction::And: 6990 case Instruction::AShr: 6991 case Instruction::BitCast: 6992 case Instruction::FAdd: 6993 case Instruction::FCmp: 6994 case Instruction::FDiv: 6995 case Instruction::FMul: 6996 case Instruction::FNeg: 6997 case Instruction::FPExt: 6998 case Instruction::FPToSI: 6999 case Instruction::FPToUI: 7000 case Instruction::FPTrunc: 7001 case Instruction::FRem: 7002 case Instruction::FSub: 7003 case Instruction::ICmp: 7004 case Instruction::IntToPtr: 7005 case Instruction::LShr: 7006 case Instruction::Mul: 7007 case Instruction::Or: 7008 case Instruction::PtrToInt: 7009 case Instruction::SDiv: 7010 case Instruction::Select: 7011 case Instruction::SExt: 7012 case Instruction::Shl: 7013 case Instruction::SIToFP: 7014 case Instruction::SRem: 7015 case Instruction::Sub: 7016 case Instruction::Trunc: 7017 case Instruction::UDiv: 7018 case Instruction::UIToFP: 7019 case Instruction::URem: 7020 case Instruction::Xor: 7021 case Instruction::ZExt: 7022 return true; 7023 } 7024 return false; 7025 }; 7026 7027 if (!IsVectorizableOpcode(I->getOpcode())) 7028 return nullptr; 7029 7030 // Success: widen this instruction. 7031 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 7032 } 7033 7034 VPBasicBlock *VPRecipeBuilder::handleReplication( 7035 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 7036 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 7037 VPlanPtr &Plan) { 7038 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 7039 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 7040 Range); 7041 7042 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 7043 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 7044 7045 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 7046 setRecipe(I, Recipe); 7047 7048 // Find if I uses a predicated instruction. If so, it will use its scalar 7049 // value. Avoid hoisting the insert-element which packs the scalar value into 7050 // a vector value, as that happens iff all users use the vector value. 7051 for (auto &Op : I->operands()) 7052 if (auto *PredInst = dyn_cast<Instruction>(Op)) 7053 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 7054 PredInst2Recipe[PredInst]->setAlsoPack(false); 7055 7056 // Finalize the recipe for Instr, first if it is not predicated. 7057 if (!IsPredicated) { 7058 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 7059 VPBB->appendRecipe(Recipe); 7060 return VPBB; 7061 } 7062 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 7063 assert(VPBB->getSuccessors().empty() && 7064 "VPBB has successors when handling predicated replication."); 7065 // Record predicated instructions for above packing optimizations. 7066 PredInst2Recipe[I] = Recipe; 7067 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 7068 VPBlockUtils::insertBlockAfter(Region, VPBB); 7069 auto *RegSucc = new VPBasicBlock(); 7070 VPBlockUtils::insertBlockAfter(RegSucc, Region); 7071 return RegSucc; 7072 } 7073 7074 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 7075 VPRecipeBase *PredRecipe, 7076 VPlanPtr &Plan) { 7077 // Instructions marked for predication are replicated and placed under an 7078 // if-then construct to prevent side-effects. 7079 7080 // Generate recipes to compute the block mask for this region. 7081 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 7082 7083 // Build the triangular if-then region. 7084 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 7085 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 7086 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 7087 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 7088 auto *PHIRecipe = 7089 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 7090 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 7091 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 7092 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 7093 7094 // Note: first set Entry as region entry and then connect successors starting 7095 // from it in order, to propagate the "parent" of each VPBasicBlock. 7096 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 7097 VPBlockUtils::connectBlocks(Pred, Exit); 7098 7099 return Region; 7100 } 7101 7102 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 7103 VFRange &Range, 7104 VPlanPtr &Plan) { 7105 // First, check for specific widening recipes that deal with calls, memory 7106 // operations, inductions and Phi nodes. 7107 if (auto *CI = dyn_cast<CallInst>(Instr)) 7108 return tryToWidenCall(CI, Range, *Plan); 7109 7110 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 7111 return tryToWidenMemory(Instr, Range, Plan); 7112 7113 VPRecipeBase *Recipe; 7114 if (auto Phi = dyn_cast<PHINode>(Instr)) { 7115 if (Phi->getParent() != OrigLoop->getHeader()) 7116 return tryToBlend(Phi, Plan); 7117 if ((Recipe = tryToOptimizeInductionPHI(Phi))) 7118 return Recipe; 7119 return new VPWidenPHIRecipe(Phi); 7120 return new VPWidenPHIRecipe(Phi); 7121 } 7122 7123 if (isa<TruncInst>(Instr) && 7124 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range))) 7125 return Recipe; 7126 7127 if (!shouldWiden(Instr, Range)) 7128 return nullptr; 7129 7130 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 7131 return new VPWidenGEPRecipe(GEP, OrigLoop); 7132 7133 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 7134 bool InvariantCond = 7135 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 7136 return new VPWidenSelectRecipe(*SI, InvariantCond); 7137 } 7138 7139 return tryToWiden(Instr, *Plan); 7140 } 7141 7142 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 7143 unsigned MaxVF) { 7144 assert(OrigLoop->empty() && "Inner loop expected."); 7145 7146 // Collect conditions feeding internal conditional branches; they need to be 7147 // represented in VPlan for it to model masking. 7148 SmallPtrSet<Value *, 1> NeedDef; 7149 7150 auto *Latch = OrigLoop->getLoopLatch(); 7151 for (BasicBlock *BB : OrigLoop->blocks()) { 7152 if (BB == Latch) 7153 continue; 7154 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 7155 if (Branch && Branch->isConditional()) 7156 NeedDef.insert(Branch->getCondition()); 7157 } 7158 7159 // If the tail is to be folded by masking, the primary induction variable, if 7160 // exists needs to be represented in VPlan for it to model early-exit masking. 7161 // Also, both the Phi and the live-out instruction of each reduction are 7162 // required in order to introduce a select between them in VPlan. 7163 if (CM.foldTailByMasking()) { 7164 if (Legal->getPrimaryInduction()) 7165 NeedDef.insert(Legal->getPrimaryInduction()); 7166 for (auto &Reduction : Legal->getReductionVars()) { 7167 NeedDef.insert(Reduction.first); 7168 NeedDef.insert(Reduction.second.getLoopExitInstr()); 7169 } 7170 } 7171 7172 // Collect instructions from the original loop that will become trivially dead 7173 // in the vectorized loop. We don't need to vectorize these instructions. For 7174 // example, original induction update instructions can become dead because we 7175 // separately emit induction "steps" when generating code for the new loop. 7176 // Similarly, we create a new latch condition when setting up the structure 7177 // of the new loop, so the old one can become dead. 7178 SmallPtrSet<Instruction *, 4> DeadInstructions; 7179 collectTriviallyDeadInstructions(DeadInstructions); 7180 7181 // Add assume instructions we need to drop to DeadInstructions, to prevent 7182 // them from being added to the VPlan. 7183 // TODO: We only need to drop assumes in blocks that get flattend. If the 7184 // control flow is preserved, we should keep them. 7185 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 7186 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 7187 7188 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 7189 // Dead instructions do not need sinking. Remove them from SinkAfter. 7190 for (Instruction *I : DeadInstructions) 7191 SinkAfter.erase(I); 7192 7193 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 7194 VFRange SubRange = {VF, MaxVF + 1}; 7195 VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef, 7196 DeadInstructions, SinkAfter)); 7197 VF = SubRange.End; 7198 } 7199 } 7200 7201 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 7202 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 7203 SmallPtrSetImpl<Instruction *> &DeadInstructions, 7204 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 7205 7206 // Hold a mapping from predicated instructions to their recipes, in order to 7207 // fix their AlsoPack behavior if a user is determined to replicate and use a 7208 // scalar instead of vector value. 7209 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 7210 7211 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 7212 7213 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 7214 7215 // --------------------------------------------------------------------------- 7216 // Pre-construction: record ingredients whose recipes we'll need to further 7217 // process after constructing the initial VPlan. 7218 // --------------------------------------------------------------------------- 7219 7220 // Mark instructions we'll need to sink later and their targets as 7221 // ingredients whose recipe we'll need to record. 7222 for (auto &Entry : SinkAfter) { 7223 RecipeBuilder.recordRecipeOf(Entry.first); 7224 RecipeBuilder.recordRecipeOf(Entry.second); 7225 } 7226 7227 // For each interleave group which is relevant for this (possibly trimmed) 7228 // Range, add it to the set of groups to be later applied to the VPlan and add 7229 // placeholders for its members' Recipes which we'll be replacing with a 7230 // single VPInterleaveRecipe. 7231 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 7232 auto applyIG = [IG, this](unsigned VF) -> bool { 7233 return (VF >= 2 && // Query is illegal for VF == 1 7234 CM.getWideningDecision(IG->getInsertPos(), VF) == 7235 LoopVectorizationCostModel::CM_Interleave); 7236 }; 7237 if (!getDecisionAndClampRange(applyIG, Range)) 7238 continue; 7239 InterleaveGroups.insert(IG); 7240 for (unsigned i = 0; i < IG->getFactor(); i++) 7241 if (Instruction *Member = IG->getMember(i)) 7242 RecipeBuilder.recordRecipeOf(Member); 7243 }; 7244 7245 // --------------------------------------------------------------------------- 7246 // Build initial VPlan: Scan the body of the loop in a topological order to 7247 // visit each basic block after having visited its predecessor basic blocks. 7248 // --------------------------------------------------------------------------- 7249 7250 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 7251 auto Plan = std::make_unique<VPlan>(); 7252 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 7253 Plan->setEntry(VPBB); 7254 7255 // Represent values that will have defs inside VPlan. 7256 for (Value *V : NeedDef) 7257 Plan->addVPValue(V); 7258 7259 // Scan the body of the loop in a topological order to visit each basic block 7260 // after having visited its predecessor basic blocks. 7261 LoopBlocksDFS DFS(OrigLoop); 7262 DFS.perform(LI); 7263 7264 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 7265 // Relevant instructions from basic block BB will be grouped into VPRecipe 7266 // ingredients and fill a new VPBasicBlock. 7267 unsigned VPBBsForBB = 0; 7268 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 7269 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 7270 VPBB = FirstVPBBForBB; 7271 Builder.setInsertPoint(VPBB); 7272 7273 // Introduce each ingredient into VPlan. 7274 // TODO: Model and preserve debug instrinsics in VPlan. 7275 for (Instruction &I : BB->instructionsWithoutDebug()) { 7276 Instruction *Instr = &I; 7277 7278 // First filter out irrelevant instructions, to ensure no recipes are 7279 // built for them. 7280 if (isa<BranchInst>(Instr) || 7281 DeadInstructions.find(Instr) != DeadInstructions.end()) 7282 continue; 7283 7284 if (auto Recipe = 7285 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 7286 RecipeBuilder.setRecipe(Instr, Recipe); 7287 VPBB->appendRecipe(Recipe); 7288 continue; 7289 } 7290 7291 // Otherwise, if all widening options failed, Instruction is to be 7292 // replicated. This may create a successor for VPBB. 7293 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 7294 Instr, Range, VPBB, PredInst2Recipe, Plan); 7295 if (NextVPBB != VPBB) { 7296 VPBB = NextVPBB; 7297 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 7298 : ""); 7299 } 7300 } 7301 } 7302 7303 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 7304 // may also be empty, such as the last one VPBB, reflecting original 7305 // basic-blocks with no recipes. 7306 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 7307 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 7308 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 7309 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 7310 delete PreEntry; 7311 7312 // --------------------------------------------------------------------------- 7313 // Transform initial VPlan: Apply previously taken decisions, in order, to 7314 // bring the VPlan to its final state. 7315 // --------------------------------------------------------------------------- 7316 7317 // Apply Sink-After legal constraints. 7318 for (auto &Entry : SinkAfter) { 7319 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 7320 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 7321 Sink->moveAfter(Target); 7322 } 7323 7324 // Interleave memory: for each Interleave Group we marked earlier as relevant 7325 // for this VPlan, replace the Recipes widening its memory instructions with a 7326 // single VPInterleaveRecipe at its insertion point. 7327 for (auto IG : InterleaveGroups) { 7328 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 7329 RecipeBuilder.getRecipe(IG->getInsertPos())); 7330 (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask())) 7331 ->insertBefore(Recipe); 7332 7333 for (unsigned i = 0; i < IG->getFactor(); ++i) 7334 if (Instruction *Member = IG->getMember(i)) { 7335 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 7336 } 7337 } 7338 7339 // Finally, if tail is folded by masking, introduce selects between the phi 7340 // and the live-out instruction of each reduction, at the end of the latch. 7341 if (CM.foldTailByMasking()) { 7342 Builder.setInsertPoint(VPBB); 7343 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 7344 for (auto &Reduction : Legal->getReductionVars()) { 7345 VPValue *Phi = Plan->getVPValue(Reduction.first); 7346 VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr()); 7347 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 7348 } 7349 } 7350 7351 std::string PlanName; 7352 raw_string_ostream RSO(PlanName); 7353 unsigned VF = Range.Start; 7354 Plan->addVF(VF); 7355 RSO << "Initial VPlan for VF={" << VF; 7356 for (VF *= 2; VF < Range.End; VF *= 2) { 7357 Plan->addVF(VF); 7358 RSO << "," << VF; 7359 } 7360 RSO << "},UF>=1"; 7361 RSO.flush(); 7362 Plan->setName(PlanName); 7363 7364 return Plan; 7365 } 7366 7367 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 7368 // Outer loop handling: They may require CFG and instruction level 7369 // transformations before even evaluating whether vectorization is profitable. 7370 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7371 // the vectorization pipeline. 7372 assert(!OrigLoop->empty()); 7373 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7374 7375 // Create new empty VPlan 7376 auto Plan = std::make_unique<VPlan>(); 7377 7378 // Build hierarchical CFG 7379 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 7380 HCFGBuilder.buildHierarchicalCFG(); 7381 7382 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 7383 Plan->addVF(VF); 7384 7385 if (EnableVPlanPredication) { 7386 VPlanPredicator VPP(*Plan); 7387 VPP.predicate(); 7388 7389 // Avoid running transformation to recipes until masked code generation in 7390 // VPlan-native path is in place. 7391 return Plan; 7392 } 7393 7394 SmallPtrSet<Instruction *, 1> DeadInstructions; 7395 VPlanTransforms::VPInstructionsToVPRecipes( 7396 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 7397 return Plan; 7398 } 7399 7400 Value* LoopVectorizationPlanner::VPCallbackILV:: 7401 getOrCreateVectorValues(Value *V, unsigned Part) { 7402 return ILV.getOrCreateVectorValue(V, Part); 7403 } 7404 7405 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue( 7406 Value *V, const VPIteration &Instance) { 7407 return ILV.getOrCreateScalarValue(V, Instance); 7408 } 7409 7410 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 7411 VPSlotTracker &SlotTracker) const { 7412 O << " +\n" 7413 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 7414 IG->getInsertPos()->printAsOperand(O, false); 7415 O << ", "; 7416 getAddr()->printAsOperand(O, SlotTracker); 7417 VPValue *Mask = getMask(); 7418 if (Mask) { 7419 O << ", "; 7420 Mask->printAsOperand(O, SlotTracker); 7421 } 7422 O << "\\l\""; 7423 for (unsigned i = 0; i < IG->getFactor(); ++i) 7424 if (Instruction *I = IG->getMember(i)) 7425 O << " +\n" 7426 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 7427 } 7428 7429 void VPWidenCallRecipe::execute(VPTransformState &State) { 7430 State.ILV->widenCallInstruction(Ingredient, User, State); 7431 } 7432 7433 void VPWidenSelectRecipe::execute(VPTransformState &State) { 7434 State.ILV->widenSelectInstruction(Ingredient, InvariantCond); 7435 } 7436 7437 void VPWidenRecipe::execute(VPTransformState &State) { 7438 State.ILV->widenInstruction(Ingredient, User, State); 7439 } 7440 7441 void VPWidenGEPRecipe::execute(VPTransformState &State) { 7442 State.ILV->widenGEP(GEP, State.UF, State.VF, IsPtrLoopInvariant, 7443 IsIndexLoopInvariant); 7444 } 7445 7446 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 7447 assert(!State.Instance && "Int or FP induction being replicated."); 7448 State.ILV->widenIntOrFpInduction(IV, Trunc); 7449 } 7450 7451 void VPWidenPHIRecipe::execute(VPTransformState &State) { 7452 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 7453 } 7454 7455 void VPBlendRecipe::execute(VPTransformState &State) { 7456 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7457 // We know that all PHIs in non-header blocks are converted into 7458 // selects, so we don't have to worry about the insertion order and we 7459 // can just use the builder. 7460 // At this point we generate the predication tree. There may be 7461 // duplications since this is a simple recursive scan, but future 7462 // optimizations will clean it up. 7463 7464 unsigned NumIncoming = getNumIncomingValues(); 7465 7466 // Generate a sequence of selects of the form: 7467 // SELECT(Mask3, In3, 7468 // SELECT(Mask2, In2, 7469 // SELECT(Mask1, In1, 7470 // In0))) 7471 // Note that Mask0 is never used: lanes for which no path reaches this phi and 7472 // are essentially undef are taken from In0. 7473 InnerLoopVectorizer::VectorParts Entry(State.UF); 7474 for (unsigned In = 0; In < NumIncoming; ++In) { 7475 for (unsigned Part = 0; Part < State.UF; ++Part) { 7476 // We might have single edge PHIs (blocks) - use an identity 7477 // 'select' for the first PHI operand. 7478 Value *In0 = State.get(getIncomingValue(In), Part); 7479 if (In == 0) 7480 Entry[Part] = In0; // Initialize with the first incoming value. 7481 else { 7482 // Select between the current value and the previous incoming edge 7483 // based on the incoming mask. 7484 Value *Cond = State.get(getMask(In), Part); 7485 Entry[Part] = 7486 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7487 } 7488 } 7489 } 7490 for (unsigned Part = 0; Part < State.UF; ++Part) 7491 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7492 } 7493 7494 void VPInterleaveRecipe::execute(VPTransformState &State) { 7495 assert(!State.Instance && "Interleave group being replicated."); 7496 State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask()); 7497 } 7498 7499 void VPReplicateRecipe::execute(VPTransformState &State) { 7500 if (State.Instance) { // Generate a single instance. 7501 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 7502 // Insert scalar instance packing it into a vector. 7503 if (AlsoPack && State.VF > 1) { 7504 // If we're constructing lane 0, initialize to start from undef. 7505 if (State.Instance->Lane == 0) { 7506 Value *Undef = 7507 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7508 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7509 } 7510 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7511 } 7512 return; 7513 } 7514 7515 // Generate scalar instances for all VF lanes of all UF parts, unless the 7516 // instruction is uniform inwhich case generate only the first lane for each 7517 // of the UF parts. 7518 unsigned EndLane = IsUniform ? 1 : State.VF; 7519 for (unsigned Part = 0; Part < State.UF; ++Part) 7520 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7521 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7522 } 7523 7524 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7525 assert(State.Instance && "Branch on Mask works only on single instance."); 7526 7527 unsigned Part = State.Instance->Part; 7528 unsigned Lane = State.Instance->Lane; 7529 7530 Value *ConditionBit = nullptr; 7531 if (!User) // Block in mask is all-one. 7532 ConditionBit = State.Builder.getTrue(); 7533 else { 7534 VPValue *BlockInMask = User->getOperand(0); 7535 ConditionBit = State.get(BlockInMask, Part); 7536 if (ConditionBit->getType()->isVectorTy()) 7537 ConditionBit = State.Builder.CreateExtractElement( 7538 ConditionBit, State.Builder.getInt32(Lane)); 7539 } 7540 7541 // Replace the temporary unreachable terminator with a new conditional branch, 7542 // whose two destinations will be set later when they are created. 7543 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7544 assert(isa<UnreachableInst>(CurrentTerminator) && 7545 "Expected to replace unreachable terminator with conditional branch."); 7546 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7547 CondBr->setSuccessor(0, nullptr); 7548 ReplaceInstWithInst(CurrentTerminator, CondBr); 7549 } 7550 7551 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7552 assert(State.Instance && "Predicated instruction PHI works per instance."); 7553 Instruction *ScalarPredInst = cast<Instruction>( 7554 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7555 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7556 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7557 assert(PredicatingBB && "Predicated block has no single predecessor."); 7558 7559 // By current pack/unpack logic we need to generate only a single phi node: if 7560 // a vector value for the predicated instruction exists at this point it means 7561 // the instruction has vector users only, and a phi for the vector value is 7562 // needed. In this case the recipe of the predicated instruction is marked to 7563 // also do that packing, thereby "hoisting" the insert-element sequence. 7564 // Otherwise, a phi node for the scalar value is needed. 7565 unsigned Part = State.Instance->Part; 7566 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7567 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7568 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7569 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7570 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7571 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7572 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7573 } else { 7574 Type *PredInstType = PredInst->getType(); 7575 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7576 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7577 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7578 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7579 } 7580 } 7581 7582 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7583 VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr; 7584 State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue, 7585 getMask()); 7586 } 7587 7588 // Determine how to lower the scalar epilogue, which depends on 1) optimising 7589 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 7590 // predication, and 4) a TTI hook that analyses whether the loop is suitable 7591 // for predication. 7592 static ScalarEpilogueLowering getScalarEpilogueLowering( 7593 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 7594 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 7595 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 7596 LoopVectorizationLegality &LVL) { 7597 bool OptSize = 7598 F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 7599 PGSOQueryType::IRPass); 7600 // 1) OptSize takes precedence over all other options, i.e. if this is set, 7601 // don't look at hints or options, and don't request a scalar epilogue. 7602 if (OptSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled) 7603 return CM_ScalarEpilogueNotAllowedOptSize; 7604 7605 bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() && 7606 !PreferPredicateOverEpilog; 7607 7608 // 2) Next, if disabling predication is requested on the command line, honour 7609 // this and request a scalar epilogue. 7610 if (PredicateOptDisabled) 7611 return CM_ScalarEpilogueAllowed; 7612 7613 // 3) and 4) look if enabling predication is requested on the command line, 7614 // with a loop hint, or if the TTI hook indicates this is profitable, request 7615 // predication . 7616 if (PreferPredicateOverEpilog || 7617 Hints.getPredicate() == LoopVectorizeHints::FK_Enabled || 7618 (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 7619 LVL.getLAI()) && 7620 Hints.getPredicate() != LoopVectorizeHints::FK_Disabled)) 7621 return CM_ScalarEpilogueNotNeededUsePredicate; 7622 7623 return CM_ScalarEpilogueAllowed; 7624 } 7625 7626 // Process the loop in the VPlan-native vectorization path. This path builds 7627 // VPlan upfront in the vectorization pipeline, which allows to apply 7628 // VPlan-to-VPlan transformations from the very beginning without modifying the 7629 // input LLVM IR. 7630 static bool processLoopInVPlanNativePath( 7631 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7632 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7633 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7634 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 7635 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 7636 7637 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7638 Function *F = L->getHeader()->getParent(); 7639 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7640 7641 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 7642 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 7643 7644 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7645 &Hints, IAI); 7646 // Use the planner for outer loop vectorization. 7647 // TODO: CM is not used at this point inside the planner. Turn CM into an 7648 // optional argument if we don't need it in the future. 7649 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 7650 7651 // Get user vectorization factor. 7652 const unsigned UserVF = Hints.getWidth(); 7653 7654 // Plan how to best vectorize, return the best VF and its cost. 7655 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 7656 7657 // If we are stress testing VPlan builds, do not attempt to generate vector 7658 // code. Masked vector code generation support will follow soon. 7659 // Also, do not attempt to vectorize if no vector code will be produced. 7660 if (VPlanBuildStressTest || EnableVPlanPredication || 7661 VectorizationFactor::Disabled() == VF) 7662 return false; 7663 7664 LVP.setBestPlan(VF.Width, 1); 7665 7666 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 7667 &CM); 7668 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7669 << L->getHeader()->getParent()->getName() << "\"\n"); 7670 LVP.executePlan(LB, DT); 7671 7672 // Mark the loop as already vectorized to avoid vectorizing again. 7673 Hints.setAlreadyVectorized(); 7674 7675 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7676 return true; 7677 } 7678 7679 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 7680 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 7681 !EnableLoopInterleaving), 7682 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 7683 !EnableLoopVectorization) {} 7684 7685 bool LoopVectorizePass::processLoop(Loop *L) { 7686 assert((EnableVPlanNativePath || L->empty()) && 7687 "VPlan-native path is not enabled. Only process inner loops."); 7688 7689 #ifndef NDEBUG 7690 const std::string DebugLocStr = getDebugLocString(L); 7691 #endif /* NDEBUG */ 7692 7693 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7694 << L->getHeader()->getParent()->getName() << "\" from " 7695 << DebugLocStr << "\n"); 7696 7697 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 7698 7699 LLVM_DEBUG( 7700 dbgs() << "LV: Loop hints:" 7701 << " force=" 7702 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7703 ? "disabled" 7704 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7705 ? "enabled" 7706 : "?")) 7707 << " width=" << Hints.getWidth() 7708 << " unroll=" << Hints.getInterleave() << "\n"); 7709 7710 // Function containing loop 7711 Function *F = L->getHeader()->getParent(); 7712 7713 // Looking at the diagnostic output is the only way to determine if a loop 7714 // was vectorized (other than looking at the IR or machine code), so it 7715 // is important to generate an optimization remark for each loop. Most of 7716 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7717 // generated as OptimizationRemark and OptimizationRemarkMissed are 7718 // less verbose reporting vectorized loops and unvectorized loops that may 7719 // benefit from vectorization, respectively. 7720 7721 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 7722 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7723 return false; 7724 } 7725 7726 PredicatedScalarEvolution PSE(*SE, *L); 7727 7728 // Check if it is legal to vectorize the loop. 7729 LoopVectorizationRequirements Requirements(*ORE); 7730 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 7731 &Requirements, &Hints, DB, AC); 7732 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7733 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7734 Hints.emitRemarkWithHints(); 7735 return false; 7736 } 7737 7738 // Check the function attributes and profiles to find out if this function 7739 // should be optimized for size. 7740 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 7741 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 7742 7743 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7744 // here. They may require CFG and instruction level transformations before 7745 // even evaluating whether vectorization is profitable. Since we cannot modify 7746 // the incoming IR, we need to build VPlan upfront in the vectorization 7747 // pipeline. 7748 if (!L->empty()) 7749 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7750 ORE, BFI, PSI, Hints); 7751 7752 assert(L->empty() && "Inner loop expected."); 7753 7754 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7755 // count by optimizing for size, to minimize overheads. 7756 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 7757 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 7758 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7759 << "This loop is worth vectorizing only if no scalar " 7760 << "iteration overheads are incurred."); 7761 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7762 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7763 else { 7764 LLVM_DEBUG(dbgs() << "\n"); 7765 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 7766 } 7767 } 7768 7769 // Check the function attributes to see if implicit floats are allowed. 7770 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7771 // an integer loop and the vector instructions selected are purely integer 7772 // vector instructions? 7773 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7774 reportVectorizationFailure( 7775 "Can't vectorize when the NoImplicitFloat attribute is used", 7776 "loop not vectorized due to NoImplicitFloat attribute", 7777 "NoImplicitFloat", ORE, L); 7778 Hints.emitRemarkWithHints(); 7779 return false; 7780 } 7781 7782 // Check if the target supports potentially unsafe FP vectorization. 7783 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7784 // for the target we're vectorizing for, to make sure none of the 7785 // additional fp-math flags can help. 7786 if (Hints.isPotentiallyUnsafe() && 7787 TTI->isFPVectorizationPotentiallyUnsafe()) { 7788 reportVectorizationFailure( 7789 "Potentially unsafe FP op prevents vectorization", 7790 "loop not vectorized due to unsafe FP support.", 7791 "UnsafeFP", ORE, L); 7792 Hints.emitRemarkWithHints(); 7793 return false; 7794 } 7795 7796 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7797 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7798 7799 // If an override option has been passed in for interleaved accesses, use it. 7800 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7801 UseInterleaved = EnableInterleavedMemAccesses; 7802 7803 // Analyze interleaved memory accesses. 7804 if (UseInterleaved) { 7805 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7806 } 7807 7808 // Use the cost model. 7809 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 7810 F, &Hints, IAI); 7811 CM.collectValuesToIgnore(); 7812 7813 // Use the planner for vectorization. 7814 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 7815 7816 // Get user vectorization factor. 7817 unsigned UserVF = Hints.getWidth(); 7818 7819 // Plan how to best vectorize, return the best VF and its cost. 7820 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF); 7821 7822 VectorizationFactor VF = VectorizationFactor::Disabled(); 7823 unsigned IC = 1; 7824 unsigned UserIC = Hints.getInterleave(); 7825 7826 if (MaybeVF) { 7827 VF = *MaybeVF; 7828 // Select the interleave count. 7829 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 7830 } 7831 7832 // Identify the diagnostic messages that should be produced. 7833 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7834 bool VectorizeLoop = true, InterleaveLoop = true; 7835 if (Requirements.doesNotMeet(F, L, Hints)) { 7836 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7837 "requirements.\n"); 7838 Hints.emitRemarkWithHints(); 7839 return false; 7840 } 7841 7842 if (VF.Width == 1) { 7843 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7844 VecDiagMsg = std::make_pair( 7845 "VectorizationNotBeneficial", 7846 "the cost-model indicates that vectorization is not beneficial"); 7847 VectorizeLoop = false; 7848 } 7849 7850 if (!MaybeVF && UserIC > 1) { 7851 // Tell the user interleaving was avoided up-front, despite being explicitly 7852 // requested. 7853 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 7854 "interleaving should be avoided up front\n"); 7855 IntDiagMsg = std::make_pair( 7856 "InterleavingAvoided", 7857 "Ignoring UserIC, because interleaving was avoided up front"); 7858 InterleaveLoop = false; 7859 } else if (IC == 1 && UserIC <= 1) { 7860 // Tell the user interleaving is not beneficial. 7861 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7862 IntDiagMsg = std::make_pair( 7863 "InterleavingNotBeneficial", 7864 "the cost-model indicates that interleaving is not beneficial"); 7865 InterleaveLoop = false; 7866 if (UserIC == 1) { 7867 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7868 IntDiagMsg.second += 7869 " and is explicitly disabled or interleave count is set to 1"; 7870 } 7871 } else if (IC > 1 && UserIC == 1) { 7872 // Tell the user interleaving is beneficial, but it explicitly disabled. 7873 LLVM_DEBUG( 7874 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7875 IntDiagMsg = std::make_pair( 7876 "InterleavingBeneficialButDisabled", 7877 "the cost-model indicates that interleaving is beneficial " 7878 "but is explicitly disabled or interleave count is set to 1"); 7879 InterleaveLoop = false; 7880 } 7881 7882 // Override IC if user provided an interleave count. 7883 IC = UserIC > 0 ? UserIC : IC; 7884 7885 // Emit diagnostic messages, if any. 7886 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7887 if (!VectorizeLoop && !InterleaveLoop) { 7888 // Do not vectorize or interleaving the loop. 7889 ORE->emit([&]() { 7890 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7891 L->getStartLoc(), L->getHeader()) 7892 << VecDiagMsg.second; 7893 }); 7894 ORE->emit([&]() { 7895 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7896 L->getStartLoc(), L->getHeader()) 7897 << IntDiagMsg.second; 7898 }); 7899 return false; 7900 } else if (!VectorizeLoop && InterleaveLoop) { 7901 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7902 ORE->emit([&]() { 7903 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7904 L->getStartLoc(), L->getHeader()) 7905 << VecDiagMsg.second; 7906 }); 7907 } else if (VectorizeLoop && !InterleaveLoop) { 7908 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7909 << ") in " << DebugLocStr << '\n'); 7910 ORE->emit([&]() { 7911 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7912 L->getStartLoc(), L->getHeader()) 7913 << IntDiagMsg.second; 7914 }); 7915 } else if (VectorizeLoop && InterleaveLoop) { 7916 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7917 << ") in " << DebugLocStr << '\n'); 7918 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7919 } 7920 7921 LVP.setBestPlan(VF.Width, IC); 7922 7923 using namespace ore; 7924 bool DisableRuntimeUnroll = false; 7925 MDNode *OrigLoopID = L->getLoopID(); 7926 7927 if (!VectorizeLoop) { 7928 assert(IC > 1 && "interleave count should not be 1 or 0"); 7929 // If we decided that it is not legal to vectorize the loop, then 7930 // interleave it. 7931 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7932 &CM); 7933 LVP.executePlan(Unroller, DT); 7934 7935 ORE->emit([&]() { 7936 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7937 L->getHeader()) 7938 << "interleaved loop (interleaved count: " 7939 << NV("InterleaveCount", IC) << ")"; 7940 }); 7941 } else { 7942 // If we decided that it is *legal* to vectorize the loop, then do it. 7943 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7944 &LVL, &CM); 7945 LVP.executePlan(LB, DT); 7946 ++LoopsVectorized; 7947 7948 // Add metadata to disable runtime unrolling a scalar loop when there are 7949 // no runtime checks about strides and memory. A scalar loop that is 7950 // rarely used is not worth unrolling. 7951 if (!LB.areSafetyChecksAdded()) 7952 DisableRuntimeUnroll = true; 7953 7954 // Report the vectorization decision. 7955 ORE->emit([&]() { 7956 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7957 L->getHeader()) 7958 << "vectorized loop (vectorization width: " 7959 << NV("VectorizationFactor", VF.Width) 7960 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7961 }); 7962 } 7963 7964 Optional<MDNode *> RemainderLoopID = 7965 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7966 LLVMLoopVectorizeFollowupEpilogue}); 7967 if (RemainderLoopID.hasValue()) { 7968 L->setLoopID(RemainderLoopID.getValue()); 7969 } else { 7970 if (DisableRuntimeUnroll) 7971 AddRuntimeUnrollDisableMetaData(L); 7972 7973 // Mark the loop as already vectorized to avoid vectorizing again. 7974 Hints.setAlreadyVectorized(); 7975 } 7976 7977 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7978 return true; 7979 } 7980 7981 LoopVectorizeResult LoopVectorizePass::runImpl( 7982 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7983 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7984 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7985 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7986 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 7987 SE = &SE_; 7988 LI = &LI_; 7989 TTI = &TTI_; 7990 DT = &DT_; 7991 BFI = &BFI_; 7992 TLI = TLI_; 7993 AA = &AA_; 7994 AC = &AC_; 7995 GetLAA = &GetLAA_; 7996 DB = &DB_; 7997 ORE = &ORE_; 7998 PSI = PSI_; 7999 8000 // Don't attempt if 8001 // 1. the target claims to have no vector registers, and 8002 // 2. interleaving won't help ILP. 8003 // 8004 // The second condition is necessary because, even if the target has no 8005 // vector registers, loop vectorization may still enable scalar 8006 // interleaving. 8007 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 8008 TTI->getMaxInterleaveFactor(1) < 2) 8009 return LoopVectorizeResult(false, false); 8010 8011 bool Changed = false, CFGChanged = false; 8012 8013 // The vectorizer requires loops to be in simplified form. 8014 // Since simplification may add new inner loops, it has to run before the 8015 // legality and profitability checks. This means running the loop vectorizer 8016 // will simplify all loops, regardless of whether anything end up being 8017 // vectorized. 8018 for (auto &L : *LI) 8019 Changed |= CFGChanged |= 8020 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 8021 8022 // Build up a worklist of inner-loops to vectorize. This is necessary as 8023 // the act of vectorizing or partially unrolling a loop creates new loops 8024 // and can invalidate iterators across the loops. 8025 SmallVector<Loop *, 8> Worklist; 8026 8027 for (Loop *L : *LI) 8028 collectSupportedLoops(*L, LI, ORE, Worklist); 8029 8030 LoopsAnalyzed += Worklist.size(); 8031 8032 // Now walk the identified inner loops. 8033 while (!Worklist.empty()) { 8034 Loop *L = Worklist.pop_back_val(); 8035 8036 // For the inner loops we actually process, form LCSSA to simplify the 8037 // transform. 8038 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 8039 8040 Changed |= CFGChanged |= processLoop(L); 8041 } 8042 8043 // Process each loop nest in the function. 8044 return LoopVectorizeResult(Changed, CFGChanged); 8045 } 8046 8047 PreservedAnalyses LoopVectorizePass::run(Function &F, 8048 FunctionAnalysisManager &AM) { 8049 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 8050 auto &LI = AM.getResult<LoopAnalysis>(F); 8051 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 8052 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 8053 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 8054 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 8055 auto &AA = AM.getResult<AAManager>(F); 8056 auto &AC = AM.getResult<AssumptionAnalysis>(F); 8057 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 8058 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 8059 MemorySSA *MSSA = EnableMSSALoopDependency 8060 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 8061 : nullptr; 8062 8063 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 8064 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 8065 [&](Loop &L) -> const LoopAccessInfo & { 8066 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA}; 8067 return LAM.getResult<LoopAccessAnalysis>(L, AR); 8068 }; 8069 const ModuleAnalysisManager &MAM = 8070 AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager(); 8071 ProfileSummaryInfo *PSI = 8072 MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 8073 LoopVectorizeResult Result = 8074 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 8075 if (!Result.MadeAnyChange) 8076 return PreservedAnalyses::all(); 8077 PreservedAnalyses PA; 8078 8079 // We currently do not preserve loopinfo/dominator analyses with outer loop 8080 // vectorization. Until this is addressed, mark these analyses as preserved 8081 // only for non-VPlan-native path. 8082 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 8083 if (!EnableVPlanNativePath) { 8084 PA.preserve<LoopAnalysis>(); 8085 PA.preserve<DominatorTreeAnalysis>(); 8086 } 8087 PA.preserve<BasicAA>(); 8088 PA.preserve<GlobalsAA>(); 8089 if (!Result.MadeCFGChange) 8090 PA.preserveSet<CFGAnalyses>(); 8091 return PA; 8092 } 8093