1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpander.h" 95 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 96 #include "llvm/Analysis/TargetLibraryInfo.h" 97 #include "llvm/Analysis/TargetTransformInfo.h" 98 #include "llvm/Analysis/VectorUtils.h" 99 #include "llvm/IR/Attributes.h" 100 #include "llvm/IR/BasicBlock.h" 101 #include "llvm/IR/CFG.h" 102 #include "llvm/IR/Constant.h" 103 #include "llvm/IR/Constants.h" 104 #include "llvm/IR/DataLayout.h" 105 #include "llvm/IR/DebugInfoMetadata.h" 106 #include "llvm/IR/DebugLoc.h" 107 #include "llvm/IR/DerivedTypes.h" 108 #include "llvm/IR/DiagnosticInfo.h" 109 #include "llvm/IR/Dominators.h" 110 #include "llvm/IR/Function.h" 111 #include "llvm/IR/IRBuilder.h" 112 #include "llvm/IR/InstrTypes.h" 113 #include "llvm/IR/Instruction.h" 114 #include "llvm/IR/Instructions.h" 115 #include "llvm/IR/IntrinsicInst.h" 116 #include "llvm/IR/Intrinsics.h" 117 #include "llvm/IR/LLVMContext.h" 118 #include "llvm/IR/Metadata.h" 119 #include "llvm/IR/Module.h" 120 #include "llvm/IR/Operator.h" 121 #include "llvm/IR/Type.h" 122 #include "llvm/IR/Use.h" 123 #include "llvm/IR/User.h" 124 #include "llvm/IR/Value.h" 125 #include "llvm/IR/ValueHandle.h" 126 #include "llvm/IR/Verifier.h" 127 #include "llvm/InitializePasses.h" 128 #include "llvm/Pass.h" 129 #include "llvm/Support/Casting.h" 130 #include "llvm/Support/CommandLine.h" 131 #include "llvm/Support/Compiler.h" 132 #include "llvm/Support/Debug.h" 133 #include "llvm/Support/ErrorHandling.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <cstdlib> 147 #include <functional> 148 #include <iterator> 149 #include <limits> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 /// @{ 161 /// Metadata attribute names 162 static const char *const LLVMLoopVectorizeFollowupAll = 163 "llvm.loop.vectorize.followup_all"; 164 static const char *const LLVMLoopVectorizeFollowupVectorized = 165 "llvm.loop.vectorize.followup_vectorized"; 166 static const char *const LLVMLoopVectorizeFollowupEpilogue = 167 "llvm.loop.vectorize.followup_epilogue"; 168 /// @} 169 170 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 171 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 172 173 /// Loops with a known constant trip count below this number are vectorized only 174 /// if no scalar iteration overheads are incurred. 175 static cl::opt<unsigned> TinyTripCountVectorThreshold( 176 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 177 cl::desc("Loops with a constant trip count that is smaller than this " 178 "value are vectorized only if no scalar iteration overheads " 179 "are incurred.")); 180 181 // Indicates that an epilogue is undesired, predication is preferred. 182 // This means that the vectorizer will try to fold the loop-tail (epilogue) 183 // into the loop and predicate the loop body accordingly. 184 static cl::opt<bool> PreferPredicateOverEpilog( 185 "prefer-predicate-over-epilog", cl::init(false), cl::Hidden, 186 cl::desc("Indicate that an epilogue is undesired, predication should be " 187 "used instead.")); 188 189 static cl::opt<bool> MaximizeBandwidth( 190 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 191 cl::desc("Maximize bandwidth when selecting vectorization factor which " 192 "will be determined by the smallest type in loop.")); 193 194 static cl::opt<bool> EnableInterleavedMemAccesses( 195 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 196 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 197 198 /// An interleave-group may need masking if it resides in a block that needs 199 /// predication, or in order to mask away gaps. 200 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 201 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 202 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 203 204 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 205 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 206 cl::desc("We don't interleave loops with a estimated constant trip count " 207 "below this number")); 208 209 static cl::opt<unsigned> ForceTargetNumScalarRegs( 210 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 211 cl::desc("A flag that overrides the target's number of scalar registers.")); 212 213 static cl::opt<unsigned> ForceTargetNumVectorRegs( 214 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 215 cl::desc("A flag that overrides the target's number of vector registers.")); 216 217 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 218 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 219 cl::desc("A flag that overrides the target's max interleave factor for " 220 "scalar loops.")); 221 222 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 223 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 224 cl::desc("A flag that overrides the target's max interleave factor for " 225 "vectorized loops.")); 226 227 static cl::opt<unsigned> ForceTargetInstructionCost( 228 "force-target-instruction-cost", cl::init(0), cl::Hidden, 229 cl::desc("A flag that overrides the target's expected cost for " 230 "an instruction to a single constant value. Mostly " 231 "useful for getting consistent testing.")); 232 233 static cl::opt<unsigned> SmallLoopCost( 234 "small-loop-cost", cl::init(20), cl::Hidden, 235 cl::desc( 236 "The cost of a loop that is considered 'small' by the interleaver.")); 237 238 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 239 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 240 cl::desc("Enable the use of the block frequency analysis to access PGO " 241 "heuristics minimizing code growth in cold regions and being more " 242 "aggressive in hot regions.")); 243 244 // Runtime interleave loops for load/store throughput. 245 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 246 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 247 cl::desc( 248 "Enable runtime interleaving until load/store ports are saturated")); 249 250 /// The number of stores in a loop that are allowed to need predication. 251 static cl::opt<unsigned> NumberOfStoresToPredicate( 252 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 253 cl::desc("Max number of stores to be predicated behind an if.")); 254 255 static cl::opt<bool> EnableIndVarRegisterHeur( 256 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 257 cl::desc("Count the induction variable only once when interleaving")); 258 259 static cl::opt<bool> EnableCondStoresVectorization( 260 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 261 cl::desc("Enable if predication of stores during vectorization.")); 262 263 static cl::opt<unsigned> MaxNestedScalarReductionIC( 264 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 265 cl::desc("The maximum interleave count to use when interleaving a scalar " 266 "reduction in a nested loop.")); 267 268 cl::opt<bool> EnableVPlanNativePath( 269 "enable-vplan-native-path", cl::init(false), cl::Hidden, 270 cl::desc("Enable VPlan-native vectorization path with " 271 "support for outer loop vectorization.")); 272 273 // FIXME: Remove this switch once we have divergence analysis. Currently we 274 // assume divergent non-backedge branches when this switch is true. 275 cl::opt<bool> EnableVPlanPredication( 276 "enable-vplan-predication", cl::init(false), cl::Hidden, 277 cl::desc("Enable VPlan-native vectorization path predicator with " 278 "support for outer loop vectorization.")); 279 280 // This flag enables the stress testing of the VPlan H-CFG construction in the 281 // VPlan-native vectorization path. It must be used in conjuction with 282 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 283 // verification of the H-CFGs built. 284 static cl::opt<bool> VPlanBuildStressTest( 285 "vplan-build-stress-test", cl::init(false), cl::Hidden, 286 cl::desc( 287 "Build VPlan for every supported loop nest in the function and bail " 288 "out right after the build (stress test the VPlan H-CFG construction " 289 "in the VPlan-native vectorization path).")); 290 291 cl::opt<bool> llvm::EnableLoopInterleaving( 292 "interleave-loops", cl::init(true), cl::Hidden, 293 cl::desc("Enable loop interleaving in Loop vectorization passes")); 294 cl::opt<bool> llvm::EnableLoopVectorization( 295 "vectorize-loops", cl::init(true), cl::Hidden, 296 cl::desc("Run the Loop vectorization passes")); 297 298 /// A helper function that returns the type of loaded or stored value. 299 static Type *getMemInstValueType(Value *I) { 300 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 301 "Expected Load or Store instruction"); 302 if (auto *LI = dyn_cast<LoadInst>(I)) 303 return LI->getType(); 304 return cast<StoreInst>(I)->getValueOperand()->getType(); 305 } 306 307 /// A helper function that returns true if the given type is irregular. The 308 /// type is irregular if its allocated size doesn't equal the store size of an 309 /// element of the corresponding vector type at the given vectorization factor. 310 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 311 // Determine if an array of VF elements of type Ty is "bitcast compatible" 312 // with a <VF x Ty> vector. 313 if (VF > 1) { 314 auto *VectorTy = VectorType::get(Ty, VF); 315 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 316 } 317 318 // If the vectorization factor is one, we just check if an array of type Ty 319 // requires padding between elements. 320 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 321 } 322 323 /// A helper function that returns the reciprocal of the block probability of 324 /// predicated blocks. If we return X, we are assuming the predicated block 325 /// will execute once for every X iterations of the loop header. 326 /// 327 /// TODO: We should use actual block probability here, if available. Currently, 328 /// we always assume predicated blocks have a 50% chance of executing. 329 static unsigned getReciprocalPredBlockProb() { return 2; } 330 331 /// A helper function that adds a 'fast' flag to floating-point operations. 332 static Value *addFastMathFlag(Value *V) { 333 if (isa<FPMathOperator>(V)) 334 cast<Instruction>(V)->setFastMathFlags(FastMathFlags::getFast()); 335 return V; 336 } 337 338 static Value *addFastMathFlag(Value *V, FastMathFlags FMF) { 339 if (isa<FPMathOperator>(V)) 340 cast<Instruction>(V)->setFastMathFlags(FMF); 341 return V; 342 } 343 344 /// A helper function that returns an integer or floating-point constant with 345 /// value C. 346 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 347 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 348 : ConstantFP::get(Ty, C); 349 } 350 351 /// Returns "best known" trip count for the specified loop \p L as defined by 352 /// the following procedure: 353 /// 1) Returns exact trip count if it is known. 354 /// 2) Returns expected trip count according to profile data if any. 355 /// 3) Returns upper bound estimate if it is known. 356 /// 4) Returns None if all of the above failed. 357 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 358 // Check if exact trip count is known. 359 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 360 return ExpectedTC; 361 362 // Check if there is an expected trip count available from profile data. 363 if (LoopVectorizeWithBlockFrequency) 364 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 365 return EstimatedTC; 366 367 // Check if upper bound estimate is known. 368 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 369 return ExpectedTC; 370 371 return None; 372 } 373 374 namespace llvm { 375 376 /// InnerLoopVectorizer vectorizes loops which contain only one basic 377 /// block to a specified vectorization factor (VF). 378 /// This class performs the widening of scalars into vectors, or multiple 379 /// scalars. This class also implements the following features: 380 /// * It inserts an epilogue loop for handling loops that don't have iteration 381 /// counts that are known to be a multiple of the vectorization factor. 382 /// * It handles the code generation for reduction variables. 383 /// * Scalarization (implementation using scalars) of un-vectorizable 384 /// instructions. 385 /// InnerLoopVectorizer does not perform any vectorization-legality 386 /// checks, and relies on the caller to check for the different legality 387 /// aspects. The InnerLoopVectorizer relies on the 388 /// LoopVectorizationLegality class to provide information about the induction 389 /// and reduction variables that were found to a given vectorization factor. 390 class InnerLoopVectorizer { 391 public: 392 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 393 LoopInfo *LI, DominatorTree *DT, 394 const TargetLibraryInfo *TLI, 395 const TargetTransformInfo *TTI, AssumptionCache *AC, 396 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 397 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 398 LoopVectorizationCostModel *CM) 399 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 400 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 401 Builder(PSE.getSE()->getContext()), 402 VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {} 403 virtual ~InnerLoopVectorizer() = default; 404 405 /// Create a new empty loop. Unlink the old loop and connect the new one. 406 /// Return the pre-header block of the new loop. 407 BasicBlock *createVectorizedLoopSkeleton(); 408 409 /// Widen a single instruction within the innermost loop. 410 void widenInstruction(Instruction &I, VPUser &Operands, 411 VPTransformState &State); 412 413 /// Widen a single call instruction within the innermost loop. 414 void widenCallInstruction(CallInst &I, VPUser &ArgOperands, 415 VPTransformState &State); 416 417 /// Widen a single select instruction within the innermost loop. 418 void widenSelectInstruction(SelectInst &I, bool InvariantCond); 419 420 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 421 void fixVectorizedLoop(); 422 423 // Return true if any runtime check is added. 424 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 425 426 /// A type for vectorized values in the new loop. Each value from the 427 /// original loop, when vectorized, is represented by UF vector values in the 428 /// new unrolled loop, where UF is the unroll factor. 429 using VectorParts = SmallVector<Value *, 2>; 430 431 /// Vectorize a single GetElementPtrInst based on information gathered and 432 /// decisions taken during planning. 433 void widenGEP(GetElementPtrInst *GEP, unsigned UF, unsigned VF, 434 bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant); 435 436 /// Vectorize a single PHINode in a block. This method handles the induction 437 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 438 /// arbitrary length vectors. 439 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF); 440 441 /// A helper function to scalarize a single Instruction in the innermost loop. 442 /// Generates a sequence of scalar instances for each lane between \p MinLane 443 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 444 /// inclusive.. 445 void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance, 446 bool IfPredicateInstr); 447 448 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 449 /// is provided, the integer induction variable will first be truncated to 450 /// the corresponding type. 451 void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr); 452 453 /// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a 454 /// vector or scalar value on-demand if one is not yet available. When 455 /// vectorizing a loop, we visit the definition of an instruction before its 456 /// uses. When visiting the definition, we either vectorize or scalarize the 457 /// instruction, creating an entry for it in the corresponding map. (In some 458 /// cases, such as induction variables, we will create both vector and scalar 459 /// entries.) Then, as we encounter uses of the definition, we derive values 460 /// for each scalar or vector use unless such a value is already available. 461 /// For example, if we scalarize a definition and one of its uses is vector, 462 /// we build the required vector on-demand with an insertelement sequence 463 /// when visiting the use. Otherwise, if the use is scalar, we can use the 464 /// existing scalar definition. 465 /// 466 /// Return a value in the new loop corresponding to \p V from the original 467 /// loop at unroll index \p Part. If the value has already been vectorized, 468 /// the corresponding vector entry in VectorLoopValueMap is returned. If, 469 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 470 /// a new vector value on-demand by inserting the scalar values into a vector 471 /// with an insertelement sequence. If the value has been neither vectorized 472 /// nor scalarized, it must be loop invariant, so we simply broadcast the 473 /// value into a vector. 474 Value *getOrCreateVectorValue(Value *V, unsigned Part); 475 476 /// Return a value in the new loop corresponding to \p V from the original 477 /// loop at unroll and vector indices \p Instance. If the value has been 478 /// vectorized but not scalarized, the necessary extractelement instruction 479 /// will be generated. 480 Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance); 481 482 /// Construct the vector value of a scalarized value \p V one lane at a time. 483 void packScalarIntoVectorValue(Value *V, const VPIteration &Instance); 484 485 /// Try to vectorize interleaved access group \p Group with the base address 486 /// given in \p Addr, optionally masking the vector operations if \p 487 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 488 /// values in the vectorized loop. 489 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 490 VPTransformState &State, VPValue *Addr, 491 VPValue *BlockInMask = nullptr); 492 493 /// Vectorize Load and Store instructions with the base address given in \p 494 /// Addr, optionally masking the vector operations if \p BlockInMask is 495 /// non-null. Use \p State to translate given VPValues to IR values in the 496 /// vectorized loop. 497 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 498 VPValue *Addr, VPValue *StoredValue, 499 VPValue *BlockInMask); 500 501 /// Set the debug location in the builder using the debug location in 502 /// the instruction. 503 void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr); 504 505 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 506 void fixNonInductionPHIs(void); 507 508 protected: 509 friend class LoopVectorizationPlanner; 510 511 /// A small list of PHINodes. 512 using PhiVector = SmallVector<PHINode *, 4>; 513 514 /// A type for scalarized values in the new loop. Each value from the 515 /// original loop, when scalarized, is represented by UF x VF scalar values 516 /// in the new unrolled loop, where UF is the unroll factor and VF is the 517 /// vectorization factor. 518 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 519 520 /// Set up the values of the IVs correctly when exiting the vector loop. 521 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 522 Value *CountRoundDown, Value *EndValue, 523 BasicBlock *MiddleBlock); 524 525 /// Create a new induction variable inside L. 526 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 527 Value *Step, Instruction *DL); 528 529 /// Handle all cross-iteration phis in the header. 530 void fixCrossIterationPHIs(); 531 532 /// Fix a first-order recurrence. This is the second phase of vectorizing 533 /// this phi node. 534 void fixFirstOrderRecurrence(PHINode *Phi); 535 536 /// Fix a reduction cross-iteration phi. This is the second phase of 537 /// vectorizing this phi node. 538 void fixReduction(PHINode *Phi); 539 540 /// Clear NSW/NUW flags from reduction instructions if necessary. 541 void clearReductionWrapFlags(RecurrenceDescriptor &RdxDesc); 542 543 /// The Loop exit block may have single value PHI nodes with some 544 /// incoming value. While vectorizing we only handled real values 545 /// that were defined inside the loop and we should have one value for 546 /// each predecessor of its parent basic block. See PR14725. 547 void fixLCSSAPHIs(); 548 549 /// Iteratively sink the scalarized operands of a predicated instruction into 550 /// the block that was created for it. 551 void sinkScalarOperands(Instruction *PredInst); 552 553 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 554 /// represented as. 555 void truncateToMinimalBitwidths(); 556 557 /// Create a broadcast instruction. This method generates a broadcast 558 /// instruction (shuffle) for loop invariant values and for the induction 559 /// value. If this is the induction variable then we extend it to N, N+1, ... 560 /// this is needed because each iteration in the loop corresponds to a SIMD 561 /// element. 562 virtual Value *getBroadcastInstrs(Value *V); 563 564 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 565 /// to each vector element of Val. The sequence starts at StartIndex. 566 /// \p Opcode is relevant for FP induction variable. 567 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 568 Instruction::BinaryOps Opcode = 569 Instruction::BinaryOpsEnd); 570 571 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 572 /// variable on which to base the steps, \p Step is the size of the step, and 573 /// \p EntryVal is the value from the original loop that maps to the steps. 574 /// Note that \p EntryVal doesn't have to be an induction variable - it 575 /// can also be a truncate instruction. 576 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 577 const InductionDescriptor &ID); 578 579 /// Create a vector induction phi node based on an existing scalar one. \p 580 /// EntryVal is the value from the original loop that maps to the vector phi 581 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 582 /// truncate instruction, instead of widening the original IV, we widen a 583 /// version of the IV truncated to \p EntryVal's type. 584 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 585 Value *Step, Instruction *EntryVal); 586 587 /// Returns true if an instruction \p I should be scalarized instead of 588 /// vectorized for the chosen vectorization factor. 589 bool shouldScalarizeInstruction(Instruction *I) const; 590 591 /// Returns true if we should generate a scalar version of \p IV. 592 bool needsScalarInduction(Instruction *IV) const; 593 594 /// If there is a cast involved in the induction variable \p ID, which should 595 /// be ignored in the vectorized loop body, this function records the 596 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 597 /// cast. We had already proved that the casted Phi is equal to the uncasted 598 /// Phi in the vectorized loop (under a runtime guard), and therefore 599 /// there is no need to vectorize the cast - the same value can be used in the 600 /// vector loop for both the Phi and the cast. 601 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 602 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 603 /// 604 /// \p EntryVal is the value from the original loop that maps to the vector 605 /// phi node and is used to distinguish what is the IV currently being 606 /// processed - original one (if \p EntryVal is a phi corresponding to the 607 /// original IV) or the "newly-created" one based on the proof mentioned above 608 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 609 /// latter case \p EntryVal is a TruncInst and we must not record anything for 610 /// that IV, but it's error-prone to expect callers of this routine to care 611 /// about that, hence this explicit parameter. 612 void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID, 613 const Instruction *EntryVal, 614 Value *VectorLoopValue, 615 unsigned Part, 616 unsigned Lane = UINT_MAX); 617 618 /// Generate a shuffle sequence that will reverse the vector Vec. 619 virtual Value *reverseVector(Value *Vec); 620 621 /// Returns (and creates if needed) the original loop trip count. 622 Value *getOrCreateTripCount(Loop *NewLoop); 623 624 /// Returns (and creates if needed) the trip count of the widened loop. 625 Value *getOrCreateVectorTripCount(Loop *NewLoop); 626 627 /// Returns a bitcasted value to the requested vector type. 628 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 629 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 630 const DataLayout &DL); 631 632 /// Emit a bypass check to see if the vector trip count is zero, including if 633 /// it overflows. 634 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 635 636 /// Emit a bypass check to see if all of the SCEV assumptions we've 637 /// had to make are correct. 638 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 639 640 /// Emit bypass checks to check any memory assumptions we may have made. 641 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 642 643 /// Compute the transformed value of Index at offset StartValue using step 644 /// StepValue. 645 /// For integer induction, returns StartValue + Index * StepValue. 646 /// For pointer induction, returns StartValue[Index * StepValue]. 647 /// FIXME: The newly created binary instructions should contain nsw/nuw 648 /// flags, which can be found from the original scalar operations. 649 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 650 const DataLayout &DL, 651 const InductionDescriptor &ID) const; 652 653 /// Add additional metadata to \p To that was not present on \p Orig. 654 /// 655 /// Currently this is used to add the noalias annotations based on the 656 /// inserted memchecks. Use this for instructions that are *cloned* into the 657 /// vector loop. 658 void addNewMetadata(Instruction *To, const Instruction *Orig); 659 660 /// Add metadata from one instruction to another. 661 /// 662 /// This includes both the original MDs from \p From and additional ones (\see 663 /// addNewMetadata). Use this for *newly created* instructions in the vector 664 /// loop. 665 void addMetadata(Instruction *To, Instruction *From); 666 667 /// Similar to the previous function but it adds the metadata to a 668 /// vector of instructions. 669 void addMetadata(ArrayRef<Value *> To, Instruction *From); 670 671 /// The original loop. 672 Loop *OrigLoop; 673 674 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 675 /// dynamic knowledge to simplify SCEV expressions and converts them to a 676 /// more usable form. 677 PredicatedScalarEvolution &PSE; 678 679 /// Loop Info. 680 LoopInfo *LI; 681 682 /// Dominator Tree. 683 DominatorTree *DT; 684 685 /// Alias Analysis. 686 AliasAnalysis *AA; 687 688 /// Target Library Info. 689 const TargetLibraryInfo *TLI; 690 691 /// Target Transform Info. 692 const TargetTransformInfo *TTI; 693 694 /// Assumption Cache. 695 AssumptionCache *AC; 696 697 /// Interface to emit optimization remarks. 698 OptimizationRemarkEmitter *ORE; 699 700 /// LoopVersioning. It's only set up (non-null) if memchecks were 701 /// used. 702 /// 703 /// This is currently only used to add no-alias metadata based on the 704 /// memchecks. The actually versioning is performed manually. 705 std::unique_ptr<LoopVersioning> LVer; 706 707 /// The vectorization SIMD factor to use. Each vector will have this many 708 /// vector elements. 709 unsigned VF; 710 711 /// The vectorization unroll factor to use. Each scalar is vectorized to this 712 /// many different vector instructions. 713 unsigned UF; 714 715 /// The builder that we use 716 IRBuilder<> Builder; 717 718 // --- Vectorization state --- 719 720 /// The vector-loop preheader. 721 BasicBlock *LoopVectorPreHeader; 722 723 /// The scalar-loop preheader. 724 BasicBlock *LoopScalarPreHeader; 725 726 /// Middle Block between the vector and the scalar. 727 BasicBlock *LoopMiddleBlock; 728 729 /// The ExitBlock of the scalar loop. 730 BasicBlock *LoopExitBlock; 731 732 /// The vector loop body. 733 BasicBlock *LoopVectorBody; 734 735 /// The scalar loop body. 736 BasicBlock *LoopScalarBody; 737 738 /// A list of all bypass blocks. The first block is the entry of the loop. 739 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 740 741 /// The new Induction variable which was added to the new block. 742 PHINode *Induction = nullptr; 743 744 /// The induction variable of the old basic block. 745 PHINode *OldInduction = nullptr; 746 747 /// Maps values from the original loop to their corresponding values in the 748 /// vectorized loop. A key value can map to either vector values, scalar 749 /// values or both kinds of values, depending on whether the key was 750 /// vectorized and scalarized. 751 VectorizerValueMap VectorLoopValueMap; 752 753 /// Store instructions that were predicated. 754 SmallVector<Instruction *, 4> PredicatedInstructions; 755 756 /// Trip count of the original loop. 757 Value *TripCount = nullptr; 758 759 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 760 Value *VectorTripCount = nullptr; 761 762 /// The legality analysis. 763 LoopVectorizationLegality *Legal; 764 765 /// The profitablity analysis. 766 LoopVectorizationCostModel *Cost; 767 768 // Record whether runtime checks are added. 769 bool AddedSafetyChecks = false; 770 771 // Holds the end values for each induction variable. We save the end values 772 // so we can later fix-up the external users of the induction variables. 773 DenseMap<PHINode *, Value *> IVEndValues; 774 775 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 776 // fixed up at the end of vector code generation. 777 SmallVector<PHINode *, 8> OrigPHIsToFix; 778 }; 779 780 class InnerLoopUnroller : public InnerLoopVectorizer { 781 public: 782 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 783 LoopInfo *LI, DominatorTree *DT, 784 const TargetLibraryInfo *TLI, 785 const TargetTransformInfo *TTI, AssumptionCache *AC, 786 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 787 LoopVectorizationLegality *LVL, 788 LoopVectorizationCostModel *CM) 789 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 790 UnrollFactor, LVL, CM) {} 791 792 private: 793 Value *getBroadcastInstrs(Value *V) override; 794 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 795 Instruction::BinaryOps Opcode = 796 Instruction::BinaryOpsEnd) override; 797 Value *reverseVector(Value *Vec) override; 798 }; 799 800 } // end namespace llvm 801 802 /// Look for a meaningful debug location on the instruction or it's 803 /// operands. 804 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 805 if (!I) 806 return I; 807 808 DebugLoc Empty; 809 if (I->getDebugLoc() != Empty) 810 return I; 811 812 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 813 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 814 if (OpInst->getDebugLoc() != Empty) 815 return OpInst; 816 } 817 818 return I; 819 } 820 821 void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 822 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) { 823 const DILocation *DIL = Inst->getDebugLoc(); 824 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 825 !isa<DbgInfoIntrinsic>(Inst)) { 826 auto NewDIL = DIL->cloneByMultiplyingDuplicationFactor(UF * VF); 827 if (NewDIL) 828 B.SetCurrentDebugLocation(NewDIL.getValue()); 829 else 830 LLVM_DEBUG(dbgs() 831 << "Failed to create new discriminator: " 832 << DIL->getFilename() << " Line: " << DIL->getLine()); 833 } 834 else 835 B.SetCurrentDebugLocation(DIL); 836 } else 837 B.SetCurrentDebugLocation(DebugLoc()); 838 } 839 840 /// Write a record \p DebugMsg about vectorization failure to the debug 841 /// output stream. If \p I is passed, it is an instruction that prevents 842 /// vectorization. 843 #ifndef NDEBUG 844 static void debugVectorizationFailure(const StringRef DebugMsg, 845 Instruction *I) { 846 dbgs() << "LV: Not vectorizing: " << DebugMsg; 847 if (I != nullptr) 848 dbgs() << " " << *I; 849 else 850 dbgs() << '.'; 851 dbgs() << '\n'; 852 } 853 #endif 854 855 /// Create an analysis remark that explains why vectorization failed 856 /// 857 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 858 /// RemarkName is the identifier for the remark. If \p I is passed it is an 859 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 860 /// the location of the remark. \return the remark object that can be 861 /// streamed to. 862 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 863 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 864 Value *CodeRegion = TheLoop->getHeader(); 865 DebugLoc DL = TheLoop->getStartLoc(); 866 867 if (I) { 868 CodeRegion = I->getParent(); 869 // If there is no debug location attached to the instruction, revert back to 870 // using the loop's. 871 if (I->getDebugLoc()) 872 DL = I->getDebugLoc(); 873 } 874 875 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 876 R << "loop not vectorized: "; 877 return R; 878 } 879 880 namespace llvm { 881 882 void reportVectorizationFailure(const StringRef DebugMsg, 883 const StringRef OREMsg, const StringRef ORETag, 884 OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { 885 LLVM_DEBUG(debugVectorizationFailure(DebugMsg, I)); 886 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 887 ORE->emit(createLVAnalysis(Hints.vectorizeAnalysisPassName(), 888 ORETag, TheLoop, I) << OREMsg); 889 } 890 891 } // end namespace llvm 892 893 #ifndef NDEBUG 894 /// \return string containing a file name and a line # for the given loop. 895 static std::string getDebugLocString(const Loop *L) { 896 std::string Result; 897 if (L) { 898 raw_string_ostream OS(Result); 899 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 900 LoopDbgLoc.print(OS); 901 else 902 // Just print the module name. 903 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 904 OS.flush(); 905 } 906 return Result; 907 } 908 #endif 909 910 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 911 const Instruction *Orig) { 912 // If the loop was versioned with memchecks, add the corresponding no-alias 913 // metadata. 914 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 915 LVer->annotateInstWithNoAlias(To, Orig); 916 } 917 918 void InnerLoopVectorizer::addMetadata(Instruction *To, 919 Instruction *From) { 920 propagateMetadata(To, From); 921 addNewMetadata(To, From); 922 } 923 924 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 925 Instruction *From) { 926 for (Value *V : To) { 927 if (Instruction *I = dyn_cast<Instruction>(V)) 928 addMetadata(I, From); 929 } 930 } 931 932 namespace llvm { 933 934 // Loop vectorization cost-model hints how the scalar epilogue loop should be 935 // lowered. 936 enum ScalarEpilogueLowering { 937 938 // The default: allowing scalar epilogues. 939 CM_ScalarEpilogueAllowed, 940 941 // Vectorization with OptForSize: don't allow epilogues. 942 CM_ScalarEpilogueNotAllowedOptSize, 943 944 // A special case of vectorisation with OptForSize: loops with a very small 945 // trip count are considered for vectorization under OptForSize, thereby 946 // making sure the cost of their loop body is dominant, free of runtime 947 // guards and scalar iteration overheads. 948 CM_ScalarEpilogueNotAllowedLowTripLoop, 949 950 // Loop hint predicate indicating an epilogue is undesired. 951 CM_ScalarEpilogueNotNeededUsePredicate 952 }; 953 954 /// LoopVectorizationCostModel - estimates the expected speedups due to 955 /// vectorization. 956 /// In many cases vectorization is not profitable. This can happen because of 957 /// a number of reasons. In this class we mainly attempt to predict the 958 /// expected speedup/slowdowns due to the supported instruction set. We use the 959 /// TargetTransformInfo to query the different backends for the cost of 960 /// different operations. 961 class LoopVectorizationCostModel { 962 public: 963 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 964 PredicatedScalarEvolution &PSE, LoopInfo *LI, 965 LoopVectorizationLegality *Legal, 966 const TargetTransformInfo &TTI, 967 const TargetLibraryInfo *TLI, DemandedBits *DB, 968 AssumptionCache *AC, 969 OptimizationRemarkEmitter *ORE, const Function *F, 970 const LoopVectorizeHints *Hints, 971 InterleavedAccessInfo &IAI) 972 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 973 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 974 Hints(Hints), InterleaveInfo(IAI) {} 975 976 /// \return An upper bound for the vectorization factor, or None if 977 /// vectorization and interleaving should be avoided up front. 978 Optional<unsigned> computeMaxVF(); 979 980 /// \return True if runtime checks are required for vectorization, and false 981 /// otherwise. 982 bool runtimeChecksRequired(); 983 984 /// \return The most profitable vectorization factor and the cost of that VF. 985 /// This method checks every power of two up to MaxVF. If UserVF is not ZERO 986 /// then this vectorization factor will be selected if vectorization is 987 /// possible. 988 VectorizationFactor selectVectorizationFactor(unsigned MaxVF); 989 990 /// Setup cost-based decisions for user vectorization factor. 991 void selectUserVectorizationFactor(unsigned UserVF) { 992 collectUniformsAndScalars(UserVF); 993 collectInstsToScalarize(UserVF); 994 } 995 996 /// \return The size (in bits) of the smallest and widest types in the code 997 /// that needs to be vectorized. We ignore values that remain scalar such as 998 /// 64 bit loop indices. 999 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1000 1001 /// \return The desired interleave count. 1002 /// If interleave count has been specified by metadata it will be returned. 1003 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1004 /// are the selected vectorization factor and the cost of the selected VF. 1005 unsigned selectInterleaveCount(unsigned VF, unsigned LoopCost); 1006 1007 /// Memory access instruction may be vectorized in more than one way. 1008 /// Form of instruction after vectorization depends on cost. 1009 /// This function takes cost-based decisions for Load/Store instructions 1010 /// and collects them in a map. This decisions map is used for building 1011 /// the lists of loop-uniform and loop-scalar instructions. 1012 /// The calculated cost is saved with widening decision in order to 1013 /// avoid redundant calculations. 1014 void setCostBasedWideningDecision(unsigned VF); 1015 1016 /// A struct that represents some properties of the register usage 1017 /// of a loop. 1018 struct RegisterUsage { 1019 /// Holds the number of loop invariant values that are used in the loop. 1020 /// The key is ClassID of target-provided register class. 1021 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1022 /// Holds the maximum number of concurrent live intervals in the loop. 1023 /// The key is ClassID of target-provided register class. 1024 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1025 }; 1026 1027 /// \return Returns information about the register usages of the loop for the 1028 /// given vectorization factors. 1029 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1030 1031 /// Collect values we want to ignore in the cost model. 1032 void collectValuesToIgnore(); 1033 1034 /// \returns The smallest bitwidth each instruction can be represented with. 1035 /// The vector equivalents of these instructions should be truncated to this 1036 /// type. 1037 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1038 return MinBWs; 1039 } 1040 1041 /// \returns True if it is more profitable to scalarize instruction \p I for 1042 /// vectorization factor \p VF. 1043 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1044 assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1."); 1045 1046 // Cost model is not run in the VPlan-native path - return conservative 1047 // result until this changes. 1048 if (EnableVPlanNativePath) 1049 return false; 1050 1051 auto Scalars = InstsToScalarize.find(VF); 1052 assert(Scalars != InstsToScalarize.end() && 1053 "VF not yet analyzed for scalarization profitability"); 1054 return Scalars->second.find(I) != Scalars->second.end(); 1055 } 1056 1057 /// Returns true if \p I is known to be uniform after vectorization. 1058 bool isUniformAfterVectorization(Instruction *I, unsigned VF) const { 1059 if (VF == 1) 1060 return true; 1061 1062 // Cost model is not run in the VPlan-native path - return conservative 1063 // result until this changes. 1064 if (EnableVPlanNativePath) 1065 return false; 1066 1067 auto UniformsPerVF = Uniforms.find(VF); 1068 assert(UniformsPerVF != Uniforms.end() && 1069 "VF not yet analyzed for uniformity"); 1070 return UniformsPerVF->second.find(I) != UniformsPerVF->second.end(); 1071 } 1072 1073 /// Returns true if \p I is known to be scalar after vectorization. 1074 bool isScalarAfterVectorization(Instruction *I, unsigned VF) const { 1075 if (VF == 1) 1076 return true; 1077 1078 // Cost model is not run in the VPlan-native path - return conservative 1079 // result until this changes. 1080 if (EnableVPlanNativePath) 1081 return false; 1082 1083 auto ScalarsPerVF = Scalars.find(VF); 1084 assert(ScalarsPerVF != Scalars.end() && 1085 "Scalar values are not calculated for VF"); 1086 return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end(); 1087 } 1088 1089 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1090 /// for vectorization factor \p VF. 1091 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1092 return VF > 1 && MinBWs.find(I) != MinBWs.end() && 1093 !isProfitableToScalarize(I, VF) && 1094 !isScalarAfterVectorization(I, VF); 1095 } 1096 1097 /// Decision that was taken during cost calculation for memory instruction. 1098 enum InstWidening { 1099 CM_Unknown, 1100 CM_Widen, // For consecutive accesses with stride +1. 1101 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1102 CM_Interleave, 1103 CM_GatherScatter, 1104 CM_Scalarize 1105 }; 1106 1107 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1108 /// instruction \p I and vector width \p VF. 1109 void setWideningDecision(Instruction *I, unsigned VF, InstWidening W, 1110 unsigned Cost) { 1111 assert(VF >= 2 && "Expected VF >=2"); 1112 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1113 } 1114 1115 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1116 /// interleaving group \p Grp and vector width \p VF. 1117 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, unsigned VF, 1118 InstWidening W, unsigned Cost) { 1119 assert(VF >= 2 && "Expected VF >=2"); 1120 /// Broadcast this decicion to all instructions inside the group. 1121 /// But the cost will be assigned to one instruction only. 1122 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1123 if (auto *I = Grp->getMember(i)) { 1124 if (Grp->getInsertPos() == I) 1125 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1126 else 1127 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1128 } 1129 } 1130 } 1131 1132 /// Return the cost model decision for the given instruction \p I and vector 1133 /// width \p VF. Return CM_Unknown if this instruction did not pass 1134 /// through the cost modeling. 1135 InstWidening getWideningDecision(Instruction *I, unsigned VF) { 1136 assert(VF >= 2 && "Expected VF >=2"); 1137 1138 // Cost model is not run in the VPlan-native path - return conservative 1139 // result until this changes. 1140 if (EnableVPlanNativePath) 1141 return CM_GatherScatter; 1142 1143 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1144 auto Itr = WideningDecisions.find(InstOnVF); 1145 if (Itr == WideningDecisions.end()) 1146 return CM_Unknown; 1147 return Itr->second.first; 1148 } 1149 1150 /// Return the vectorization cost for the given instruction \p I and vector 1151 /// width \p VF. 1152 unsigned getWideningCost(Instruction *I, unsigned VF) { 1153 assert(VF >= 2 && "Expected VF >=2"); 1154 std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF); 1155 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1156 "The cost is not calculated"); 1157 return WideningDecisions[InstOnVF].second; 1158 } 1159 1160 /// Return True if instruction \p I is an optimizable truncate whose operand 1161 /// is an induction variable. Such a truncate will be removed by adding a new 1162 /// induction variable with the destination type. 1163 bool isOptimizableIVTruncate(Instruction *I, unsigned VF) { 1164 // If the instruction is not a truncate, return false. 1165 auto *Trunc = dyn_cast<TruncInst>(I); 1166 if (!Trunc) 1167 return false; 1168 1169 // Get the source and destination types of the truncate. 1170 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1171 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1172 1173 // If the truncate is free for the given types, return false. Replacing a 1174 // free truncate with an induction variable would add an induction variable 1175 // update instruction to each iteration of the loop. We exclude from this 1176 // check the primary induction variable since it will need an update 1177 // instruction regardless. 1178 Value *Op = Trunc->getOperand(0); 1179 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1180 return false; 1181 1182 // If the truncated value is not an induction variable, return false. 1183 return Legal->isInductionPhi(Op); 1184 } 1185 1186 /// Collects the instructions to scalarize for each predicated instruction in 1187 /// the loop. 1188 void collectInstsToScalarize(unsigned VF); 1189 1190 /// Collect Uniform and Scalar values for the given \p VF. 1191 /// The sets depend on CM decision for Load/Store instructions 1192 /// that may be vectorized as interleave, gather-scatter or scalarized. 1193 void collectUniformsAndScalars(unsigned VF) { 1194 // Do the analysis once. 1195 if (VF == 1 || Uniforms.find(VF) != Uniforms.end()) 1196 return; 1197 setCostBasedWideningDecision(VF); 1198 collectLoopUniforms(VF); 1199 collectLoopScalars(VF); 1200 } 1201 1202 /// Returns true if the target machine supports masked store operation 1203 /// for the given \p DataType and kind of access to \p Ptr. 1204 bool isLegalMaskedStore(Type *DataType, Value *Ptr, MaybeAlign Alignment) { 1205 return Legal->isConsecutivePtr(Ptr) && 1206 TTI.isLegalMaskedStore(DataType, Alignment); 1207 } 1208 1209 /// Returns true if the target machine supports masked load operation 1210 /// for the given \p DataType and kind of access to \p Ptr. 1211 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, MaybeAlign Alignment) { 1212 return Legal->isConsecutivePtr(Ptr) && 1213 TTI.isLegalMaskedLoad(DataType, Alignment); 1214 } 1215 1216 /// Returns true if the target machine supports masked scatter operation 1217 /// for the given \p DataType. 1218 bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) { 1219 return TTI.isLegalMaskedScatter(DataType, Alignment); 1220 } 1221 1222 /// Returns true if the target machine supports masked gather operation 1223 /// for the given \p DataType. 1224 bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) { 1225 return TTI.isLegalMaskedGather(DataType, Alignment); 1226 } 1227 1228 /// Returns true if the target machine can represent \p V as a masked gather 1229 /// or scatter operation. 1230 bool isLegalGatherOrScatter(Value *V) { 1231 bool LI = isa<LoadInst>(V); 1232 bool SI = isa<StoreInst>(V); 1233 if (!LI && !SI) 1234 return false; 1235 auto *Ty = getMemInstValueType(V); 1236 MaybeAlign Align = getLoadStoreAlignment(V); 1237 return (LI && isLegalMaskedGather(Ty, Align)) || 1238 (SI && isLegalMaskedScatter(Ty, Align)); 1239 } 1240 1241 /// Returns true if \p I is an instruction that will be scalarized with 1242 /// predication. Such instructions include conditional stores and 1243 /// instructions that may divide by zero. 1244 /// If a non-zero VF has been calculated, we check if I will be scalarized 1245 /// predication for that VF. 1246 bool isScalarWithPredication(Instruction *I, unsigned VF = 1); 1247 1248 // Returns true if \p I is an instruction that will be predicated either 1249 // through scalar predication or masked load/store or masked gather/scatter. 1250 // Superset of instructions that return true for isScalarWithPredication. 1251 bool isPredicatedInst(Instruction *I) { 1252 if (!blockNeedsPredication(I->getParent())) 1253 return false; 1254 // Loads and stores that need some form of masked operation are predicated 1255 // instructions. 1256 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1257 return Legal->isMaskRequired(I); 1258 return isScalarWithPredication(I); 1259 } 1260 1261 /// Returns true if \p I is a memory instruction with consecutive memory 1262 /// access that can be widened. 1263 bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1); 1264 1265 /// Returns true if \p I is a memory instruction in an interleaved-group 1266 /// of memory accesses that can be vectorized with wide vector loads/stores 1267 /// and shuffles. 1268 bool interleavedAccessCanBeWidened(Instruction *I, unsigned VF = 1); 1269 1270 /// Check if \p Instr belongs to any interleaved access group. 1271 bool isAccessInterleaved(Instruction *Instr) { 1272 return InterleaveInfo.isInterleaved(Instr); 1273 } 1274 1275 /// Get the interleaved access group that \p Instr belongs to. 1276 const InterleaveGroup<Instruction> * 1277 getInterleavedAccessGroup(Instruction *Instr) { 1278 return InterleaveInfo.getInterleaveGroup(Instr); 1279 } 1280 1281 /// Returns true if an interleaved group requires a scalar iteration 1282 /// to handle accesses with gaps, and there is nothing preventing us from 1283 /// creating a scalar epilogue. 1284 bool requiresScalarEpilogue() const { 1285 return isScalarEpilogueAllowed() && InterleaveInfo.requiresScalarEpilogue(); 1286 } 1287 1288 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1289 /// loop hint annotation. 1290 bool isScalarEpilogueAllowed() const { 1291 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1292 } 1293 1294 /// Returns true if all loop blocks should be masked to fold tail loop. 1295 bool foldTailByMasking() const { return FoldTailByMasking; } 1296 1297 bool blockNeedsPredication(BasicBlock *BB) { 1298 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1299 } 1300 1301 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1302 /// with factor VF. Return the cost of the instruction, including 1303 /// scalarization overhead if it's needed. 1304 unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF); 1305 1306 /// Estimate cost of a call instruction CI if it were vectorized with factor 1307 /// VF. Return the cost of the instruction, including scalarization overhead 1308 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1309 /// scalarized - 1310 /// i.e. either vector version isn't available, or is too expensive. 1311 unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize); 1312 1313 /// Invalidates decisions already taken by the cost model. 1314 void invalidateCostModelingDecisions() { 1315 WideningDecisions.clear(); 1316 Uniforms.clear(); 1317 Scalars.clear(); 1318 } 1319 1320 private: 1321 unsigned NumPredStores = 0; 1322 1323 /// \return An upper bound for the vectorization factor, larger than zero. 1324 /// One is returned if vectorization should best be avoided due to cost. 1325 unsigned computeFeasibleMaxVF(unsigned ConstTripCount); 1326 1327 /// The vectorization cost is a combination of the cost itself and a boolean 1328 /// indicating whether any of the contributing operations will actually 1329 /// operate on 1330 /// vector values after type legalization in the backend. If this latter value 1331 /// is 1332 /// false, then all operations will be scalarized (i.e. no vectorization has 1333 /// actually taken place). 1334 using VectorizationCostTy = std::pair<unsigned, bool>; 1335 1336 /// Returns the expected execution cost. The unit of the cost does 1337 /// not matter because we use the 'cost' units to compare different 1338 /// vector widths. The cost that is returned is *not* normalized by 1339 /// the factor width. 1340 VectorizationCostTy expectedCost(unsigned VF); 1341 1342 /// Returns the execution time cost of an instruction for a given vector 1343 /// width. Vector width of one means scalar. 1344 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1345 1346 /// The cost-computation logic from getInstructionCost which provides 1347 /// the vector type as an output parameter. 1348 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1349 1350 /// Calculate vectorization cost of memory instruction \p I. 1351 unsigned getMemoryInstructionCost(Instruction *I, unsigned VF); 1352 1353 /// The cost computation for scalarized memory instruction. 1354 unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF); 1355 1356 /// The cost computation for interleaving group of memory instructions. 1357 unsigned getInterleaveGroupCost(Instruction *I, unsigned VF); 1358 1359 /// The cost computation for Gather/Scatter instruction. 1360 unsigned getGatherScatterCost(Instruction *I, unsigned VF); 1361 1362 /// The cost computation for widening instruction \p I with consecutive 1363 /// memory access. 1364 unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF); 1365 1366 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1367 /// Load: scalar load + broadcast. 1368 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1369 /// element) 1370 unsigned getUniformMemOpCost(Instruction *I, unsigned VF); 1371 1372 /// Estimate the overhead of scalarizing an instruction. This is a 1373 /// convenience wrapper for the type-based getScalarizationOverhead API. 1374 unsigned getScalarizationOverhead(Instruction *I, unsigned VF); 1375 1376 /// Returns whether the instruction is a load or store and will be a emitted 1377 /// as a vector operation. 1378 bool isConsecutiveLoadOrStore(Instruction *I); 1379 1380 /// Returns true if an artificially high cost for emulated masked memrefs 1381 /// should be used. 1382 bool useEmulatedMaskMemRefHack(Instruction *I); 1383 1384 /// Map of scalar integer values to the smallest bitwidth they can be legally 1385 /// represented as. The vector equivalents of these values should be truncated 1386 /// to this type. 1387 MapVector<Instruction *, uint64_t> MinBWs; 1388 1389 /// A type representing the costs for instructions if they were to be 1390 /// scalarized rather than vectorized. The entries are Instruction-Cost 1391 /// pairs. 1392 using ScalarCostsTy = DenseMap<Instruction *, unsigned>; 1393 1394 /// A set containing all BasicBlocks that are known to present after 1395 /// vectorization as a predicated block. 1396 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1397 1398 /// Records whether it is allowed to have the original scalar loop execute at 1399 /// least once. This may be needed as a fallback loop in case runtime 1400 /// aliasing/dependence checks fail, or to handle the tail/remainder 1401 /// iterations when the trip count is unknown or doesn't divide by the VF, 1402 /// or as a peel-loop to handle gaps in interleave-groups. 1403 /// Under optsize and when the trip count is very small we don't allow any 1404 /// iterations to execute in the scalar loop. 1405 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1406 1407 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1408 bool FoldTailByMasking = false; 1409 1410 /// A map holding scalar costs for different vectorization factors. The 1411 /// presence of a cost for an instruction in the mapping indicates that the 1412 /// instruction will be scalarized when vectorizing with the associated 1413 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1414 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1415 1416 /// Holds the instructions known to be uniform after vectorization. 1417 /// The data is collected per VF. 1418 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms; 1419 1420 /// Holds the instructions known to be scalar after vectorization. 1421 /// The data is collected per VF. 1422 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars; 1423 1424 /// Holds the instructions (address computations) that are forced to be 1425 /// scalarized. 1426 DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1427 1428 /// Returns the expected difference in cost from scalarizing the expression 1429 /// feeding a predicated instruction \p PredInst. The instructions to 1430 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1431 /// non-negative return value implies the expression will be scalarized. 1432 /// Currently, only single-use chains are considered for scalarization. 1433 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1434 unsigned VF); 1435 1436 /// Collect the instructions that are uniform after vectorization. An 1437 /// instruction is uniform if we represent it with a single scalar value in 1438 /// the vectorized loop corresponding to each vector iteration. Examples of 1439 /// uniform instructions include pointer operands of consecutive or 1440 /// interleaved memory accesses. Note that although uniformity implies an 1441 /// instruction will be scalar, the reverse is not true. In general, a 1442 /// scalarized instruction will be represented by VF scalar values in the 1443 /// vectorized loop, each corresponding to an iteration of the original 1444 /// scalar loop. 1445 void collectLoopUniforms(unsigned VF); 1446 1447 /// Collect the instructions that are scalar after vectorization. An 1448 /// instruction is scalar if it is known to be uniform or will be scalarized 1449 /// during vectorization. Non-uniform scalarized instructions will be 1450 /// represented by VF values in the vectorized loop, each corresponding to an 1451 /// iteration of the original scalar loop. 1452 void collectLoopScalars(unsigned VF); 1453 1454 /// Keeps cost model vectorization decision and cost for instructions. 1455 /// Right now it is used for memory instructions only. 1456 using DecisionList = DenseMap<std::pair<Instruction *, unsigned>, 1457 std::pair<InstWidening, unsigned>>; 1458 1459 DecisionList WideningDecisions; 1460 1461 /// Returns true if \p V is expected to be vectorized and it needs to be 1462 /// extracted. 1463 bool needsExtract(Value *V, unsigned VF) const { 1464 Instruction *I = dyn_cast<Instruction>(V); 1465 if (VF == 1 || !I || !TheLoop->contains(I) || TheLoop->isLoopInvariant(I)) 1466 return false; 1467 1468 // Assume we can vectorize V (and hence we need extraction) if the 1469 // scalars are not computed yet. This can happen, because it is called 1470 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1471 // the scalars are collected. That should be a safe assumption in most 1472 // cases, because we check if the operands have vectorizable types 1473 // beforehand in LoopVectorizationLegality. 1474 return Scalars.find(VF) == Scalars.end() || 1475 !isScalarAfterVectorization(I, VF); 1476 }; 1477 1478 /// Returns a range containing only operands needing to be extracted. 1479 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1480 unsigned VF) { 1481 return SmallVector<Value *, 4>(make_filter_range( 1482 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1483 } 1484 1485 public: 1486 /// The loop that we evaluate. 1487 Loop *TheLoop; 1488 1489 /// Predicated scalar evolution analysis. 1490 PredicatedScalarEvolution &PSE; 1491 1492 /// Loop Info analysis. 1493 LoopInfo *LI; 1494 1495 /// Vectorization legality. 1496 LoopVectorizationLegality *Legal; 1497 1498 /// Vector target information. 1499 const TargetTransformInfo &TTI; 1500 1501 /// Target Library Info. 1502 const TargetLibraryInfo *TLI; 1503 1504 /// Demanded bits analysis. 1505 DemandedBits *DB; 1506 1507 /// Assumption cache. 1508 AssumptionCache *AC; 1509 1510 /// Interface to emit optimization remarks. 1511 OptimizationRemarkEmitter *ORE; 1512 1513 const Function *TheFunction; 1514 1515 /// Loop Vectorize Hint. 1516 const LoopVectorizeHints *Hints; 1517 1518 /// The interleave access information contains groups of interleaved accesses 1519 /// with the same stride and close to each other. 1520 InterleavedAccessInfo &InterleaveInfo; 1521 1522 /// Values to ignore in the cost model. 1523 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1524 1525 /// Values to ignore in the cost model when VF > 1. 1526 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1527 }; 1528 1529 } // end namespace llvm 1530 1531 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 1532 // vectorization. The loop needs to be annotated with #pragma omp simd 1533 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 1534 // vector length information is not provided, vectorization is not considered 1535 // explicit. Interleave hints are not allowed either. These limitations will be 1536 // relaxed in the future. 1537 // Please, note that we are currently forced to abuse the pragma 'clang 1538 // vectorize' semantics. This pragma provides *auto-vectorization hints* 1539 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 1540 // provides *explicit vectorization hints* (LV can bypass legal checks and 1541 // assume that vectorization is legal). However, both hints are implemented 1542 // using the same metadata (llvm.loop.vectorize, processed by 1543 // LoopVectorizeHints). This will be fixed in the future when the native IR 1544 // representation for pragma 'omp simd' is introduced. 1545 static bool isExplicitVecOuterLoop(Loop *OuterLp, 1546 OptimizationRemarkEmitter *ORE) { 1547 assert(!OuterLp->empty() && "This is not an outer loop"); 1548 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 1549 1550 // Only outer loops with an explicit vectorization hint are supported. 1551 // Unannotated outer loops are ignored. 1552 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 1553 return false; 1554 1555 Function *Fn = OuterLp->getHeader()->getParent(); 1556 if (!Hints.allowVectorization(Fn, OuterLp, 1557 true /*VectorizeOnlyWhenForced*/)) { 1558 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 1559 return false; 1560 } 1561 1562 if (Hints.getInterleave() > 1) { 1563 // TODO: Interleave support is future work. 1564 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 1565 "outer loops.\n"); 1566 Hints.emitRemarkWithHints(); 1567 return false; 1568 } 1569 1570 return true; 1571 } 1572 1573 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 1574 OptimizationRemarkEmitter *ORE, 1575 SmallVectorImpl<Loop *> &V) { 1576 // Collect inner loops and outer loops without irreducible control flow. For 1577 // now, only collect outer loops that have explicit vectorization hints. If we 1578 // are stress testing the VPlan H-CFG construction, we collect the outermost 1579 // loop of every loop nest. 1580 if (L.empty() || VPlanBuildStressTest || 1581 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 1582 LoopBlocksRPO RPOT(&L); 1583 RPOT.perform(LI); 1584 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 1585 V.push_back(&L); 1586 // TODO: Collect inner loops inside marked outer loops in case 1587 // vectorization fails for the outer loop. Do not invoke 1588 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 1589 // already known to be reducible. We can use an inherited attribute for 1590 // that. 1591 return; 1592 } 1593 } 1594 for (Loop *InnerL : L) 1595 collectSupportedLoops(*InnerL, LI, ORE, V); 1596 } 1597 1598 namespace { 1599 1600 /// The LoopVectorize Pass. 1601 struct LoopVectorize : public FunctionPass { 1602 /// Pass identification, replacement for typeid 1603 static char ID; 1604 1605 LoopVectorizePass Impl; 1606 1607 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 1608 bool VectorizeOnlyWhenForced = false) 1609 : FunctionPass(ID), 1610 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 1611 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1612 } 1613 1614 bool runOnFunction(Function &F) override { 1615 if (skipFunction(F)) 1616 return false; 1617 1618 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1619 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1620 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1621 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1622 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1623 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1624 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 1625 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1626 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1627 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1628 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1629 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1630 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1631 1632 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1633 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1634 1635 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1636 GetLAA, *ORE, PSI).MadeAnyChange; 1637 } 1638 1639 void getAnalysisUsage(AnalysisUsage &AU) const override { 1640 AU.addRequired<AssumptionCacheTracker>(); 1641 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1642 AU.addRequired<DominatorTreeWrapperPass>(); 1643 AU.addRequired<LoopInfoWrapperPass>(); 1644 AU.addRequired<ScalarEvolutionWrapperPass>(); 1645 AU.addRequired<TargetTransformInfoWrapperPass>(); 1646 AU.addRequired<AAResultsWrapperPass>(); 1647 AU.addRequired<LoopAccessLegacyAnalysis>(); 1648 AU.addRequired<DemandedBitsWrapperPass>(); 1649 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1650 AU.addRequired<InjectTLIMappingsLegacy>(); 1651 1652 // We currently do not preserve loopinfo/dominator analyses with outer loop 1653 // vectorization. Until this is addressed, mark these analyses as preserved 1654 // only for non-VPlan-native path. 1655 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 1656 if (!EnableVPlanNativePath) { 1657 AU.addPreserved<LoopInfoWrapperPass>(); 1658 AU.addPreserved<DominatorTreeWrapperPass>(); 1659 } 1660 1661 AU.addPreserved<BasicAAWrapperPass>(); 1662 AU.addPreserved<GlobalsAAWrapperPass>(); 1663 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 1664 } 1665 }; 1666 1667 } // end anonymous namespace 1668 1669 //===----------------------------------------------------------------------===// 1670 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1671 // LoopVectorizationCostModel and LoopVectorizationPlanner. 1672 //===----------------------------------------------------------------------===// 1673 1674 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1675 // We need to place the broadcast of invariant variables outside the loop, 1676 // but only if it's proven safe to do so. Else, broadcast will be inside 1677 // vector loop body. 1678 Instruction *Instr = dyn_cast<Instruction>(V); 1679 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 1680 (!Instr || 1681 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 1682 // Place the code for broadcasting invariant variables in the new preheader. 1683 IRBuilder<>::InsertPointGuard Guard(Builder); 1684 if (SafeToHoist) 1685 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1686 1687 // Broadcast the scalar into all locations in the vector. 1688 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 1689 1690 return Shuf; 1691 } 1692 1693 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 1694 const InductionDescriptor &II, Value *Step, Instruction *EntryVal) { 1695 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1696 "Expected either an induction phi-node or a truncate of it!"); 1697 Value *Start = II.getStartValue(); 1698 1699 // Construct the initial value of the vector IV in the vector loop preheader 1700 auto CurrIP = Builder.saveIP(); 1701 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 1702 if (isa<TruncInst>(EntryVal)) { 1703 assert(Start->getType()->isIntegerTy() && 1704 "Truncation requires an integer type"); 1705 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 1706 Step = Builder.CreateTrunc(Step, TruncType); 1707 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 1708 } 1709 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 1710 Value *SteppedStart = 1711 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 1712 1713 // We create vector phi nodes for both integer and floating-point induction 1714 // variables. Here, we determine the kind of arithmetic we will perform. 1715 Instruction::BinaryOps AddOp; 1716 Instruction::BinaryOps MulOp; 1717 if (Step->getType()->isIntegerTy()) { 1718 AddOp = Instruction::Add; 1719 MulOp = Instruction::Mul; 1720 } else { 1721 AddOp = II.getInductionOpcode(); 1722 MulOp = Instruction::FMul; 1723 } 1724 1725 // Multiply the vectorization factor by the step using integer or 1726 // floating-point arithmetic as appropriate. 1727 Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF); 1728 Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF)); 1729 1730 // Create a vector splat to use in the induction update. 1731 // 1732 // FIXME: If the step is non-constant, we create the vector splat with 1733 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 1734 // handle a constant vector splat. 1735 Value *SplatVF = 1736 isa<Constant>(Mul) 1737 ? ConstantVector::getSplat({VF, false}, cast<Constant>(Mul)) 1738 : Builder.CreateVectorSplat(VF, Mul); 1739 Builder.restoreIP(CurrIP); 1740 1741 // We may need to add the step a number of times, depending on the unroll 1742 // factor. The last of those goes into the PHI. 1743 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 1744 &*LoopVectorBody->getFirstInsertionPt()); 1745 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 1746 Instruction *LastInduction = VecInd; 1747 for (unsigned Part = 0; Part < UF; ++Part) { 1748 VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction); 1749 1750 if (isa<TruncInst>(EntryVal)) 1751 addMetadata(LastInduction, EntryVal); 1752 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part); 1753 1754 LastInduction = cast<Instruction>(addFastMathFlag( 1755 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"))); 1756 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 1757 } 1758 1759 // Move the last step to the end of the latch block. This ensures consistent 1760 // placement of all induction updates. 1761 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 1762 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 1763 auto *ICmp = cast<Instruction>(Br->getCondition()); 1764 LastInduction->moveBefore(ICmp); 1765 LastInduction->setName("vec.ind.next"); 1766 1767 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 1768 VecInd->addIncoming(LastInduction, LoopVectorLatch); 1769 } 1770 1771 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 1772 return Cost->isScalarAfterVectorization(I, VF) || 1773 Cost->isProfitableToScalarize(I, VF); 1774 } 1775 1776 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 1777 if (shouldScalarizeInstruction(IV)) 1778 return true; 1779 auto isScalarInst = [&](User *U) -> bool { 1780 auto *I = cast<Instruction>(U); 1781 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 1782 }; 1783 return llvm::any_of(IV->users(), isScalarInst); 1784 } 1785 1786 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 1787 const InductionDescriptor &ID, const Instruction *EntryVal, 1788 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1789 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 1790 "Expected either an induction phi-node or a truncate of it!"); 1791 1792 // This induction variable is not the phi from the original loop but the 1793 // newly-created IV based on the proof that casted Phi is equal to the 1794 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 1795 // re-uses the same InductionDescriptor that original IV uses but we don't 1796 // have to do any recording in this case - that is done when original IV is 1797 // processed. 1798 if (isa<TruncInst>(EntryVal)) 1799 return; 1800 1801 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 1802 if (Casts.empty()) 1803 return; 1804 // Only the first Cast instruction in the Casts vector is of interest. 1805 // The rest of the Casts (if exist) have no uses outside the 1806 // induction update chain itself. 1807 Instruction *CastInst = *Casts.begin(); 1808 if (Lane < UINT_MAX) 1809 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 1810 else 1811 VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal); 1812 } 1813 1814 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) { 1815 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 1816 "Primary induction variable must have an integer type"); 1817 1818 auto II = Legal->getInductionVars().find(IV); 1819 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 1820 1821 auto ID = II->second; 1822 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 1823 1824 // The value from the original loop to which we are mapping the new induction 1825 // variable. 1826 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 1827 1828 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 1829 1830 // Generate code for the induction step. Note that induction steps are 1831 // required to be loop-invariant 1832 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 1833 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 1834 "Induction step should be loop invariant"); 1835 if (PSE.getSE()->isSCEVable(IV->getType())) { 1836 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 1837 return Exp.expandCodeFor(Step, Step->getType(), 1838 LoopVectorPreHeader->getTerminator()); 1839 } 1840 return cast<SCEVUnknown>(Step)->getValue(); 1841 }; 1842 1843 // The scalar value to broadcast. This is derived from the canonical 1844 // induction variable. If a truncation type is given, truncate the canonical 1845 // induction variable and step. Otherwise, derive these values from the 1846 // induction descriptor. 1847 auto CreateScalarIV = [&](Value *&Step) -> Value * { 1848 Value *ScalarIV = Induction; 1849 if (IV != OldInduction) { 1850 ScalarIV = IV->getType()->isIntegerTy() 1851 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 1852 : Builder.CreateCast(Instruction::SIToFP, Induction, 1853 IV->getType()); 1854 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 1855 ScalarIV->setName("offset.idx"); 1856 } 1857 if (Trunc) { 1858 auto *TruncType = cast<IntegerType>(Trunc->getType()); 1859 assert(Step->getType()->isIntegerTy() && 1860 "Truncation requires an integer step"); 1861 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 1862 Step = Builder.CreateTrunc(Step, TruncType); 1863 } 1864 return ScalarIV; 1865 }; 1866 1867 // Create the vector values from the scalar IV, in the absence of creating a 1868 // vector IV. 1869 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 1870 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 1871 for (unsigned Part = 0; Part < UF; ++Part) { 1872 Value *EntryPart = 1873 getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode()); 1874 VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart); 1875 if (Trunc) 1876 addMetadata(EntryPart, Trunc); 1877 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part); 1878 } 1879 }; 1880 1881 // Now do the actual transformations, and start with creating the step value. 1882 Value *Step = CreateStepValue(ID.getStep()); 1883 if (VF <= 1) { 1884 Value *ScalarIV = CreateScalarIV(Step); 1885 CreateSplatIV(ScalarIV, Step); 1886 return; 1887 } 1888 1889 // Determine if we want a scalar version of the induction variable. This is 1890 // true if the induction variable itself is not widened, or if it has at 1891 // least one user in the loop that is not widened. 1892 auto NeedsScalarIV = needsScalarInduction(EntryVal); 1893 if (!NeedsScalarIV) { 1894 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1895 return; 1896 } 1897 1898 // Try to create a new independent vector induction variable. If we can't 1899 // create the phi node, we will splat the scalar induction variable in each 1900 // loop iteration. 1901 if (!shouldScalarizeInstruction(EntryVal)) { 1902 createVectorIntOrFpInductionPHI(ID, Step, EntryVal); 1903 Value *ScalarIV = CreateScalarIV(Step); 1904 // Create scalar steps that can be used by instructions we will later 1905 // scalarize. Note that the addition of the scalar steps will not increase 1906 // the number of instructions in the loop in the common case prior to 1907 // InstCombine. We will be trading one vector extract for each scalar step. 1908 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1909 return; 1910 } 1911 1912 // If we haven't yet vectorized the induction variable, splat the scalar 1913 // induction variable, and build the necessary step vectors. 1914 // TODO: Don't do it unless the vectorized IV is really required. 1915 Value *ScalarIV = CreateScalarIV(Step); 1916 CreateSplatIV(ScalarIV, Step); 1917 buildScalarSteps(ScalarIV, Step, EntryVal, ID); 1918 } 1919 1920 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 1921 Instruction::BinaryOps BinOp) { 1922 // Create and check the types. 1923 auto *ValVTy = cast<VectorType>(Val->getType()); 1924 int VLen = ValVTy->getNumElements(); 1925 1926 Type *STy = Val->getType()->getScalarType(); 1927 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 1928 "Induction Step must be an integer or FP"); 1929 assert(Step->getType() == STy && "Step has wrong type"); 1930 1931 SmallVector<Constant *, 8> Indices; 1932 1933 if (STy->isIntegerTy()) { 1934 // Create a vector of consecutive numbers from zero to VF. 1935 for (int i = 0; i < VLen; ++i) 1936 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 1937 1938 // Add the consecutive indices to the vector value. 1939 Constant *Cv = ConstantVector::get(Indices); 1940 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 1941 Step = Builder.CreateVectorSplat(VLen, Step); 1942 assert(Step->getType() == Val->getType() && "Invalid step vec"); 1943 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 1944 // which can be found from the original scalar operations. 1945 Step = Builder.CreateMul(Cv, Step); 1946 return Builder.CreateAdd(Val, Step, "induction"); 1947 } 1948 1949 // Floating point induction. 1950 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 1951 "Binary Opcode should be specified for FP induction"); 1952 // Create a vector of consecutive numbers from zero to VF. 1953 for (int i = 0; i < VLen; ++i) 1954 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 1955 1956 // Add the consecutive indices to the vector value. 1957 Constant *Cv = ConstantVector::get(Indices); 1958 1959 Step = Builder.CreateVectorSplat(VLen, Step); 1960 1961 // Floating point operations had to be 'fast' to enable the induction. 1962 FastMathFlags Flags; 1963 Flags.setFast(); 1964 1965 Value *MulOp = Builder.CreateFMul(Cv, Step); 1966 if (isa<Instruction>(MulOp)) 1967 // Have to check, MulOp may be a constant 1968 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 1969 1970 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 1971 if (isa<Instruction>(BOp)) 1972 cast<Instruction>(BOp)->setFastMathFlags(Flags); 1973 return BOp; 1974 } 1975 1976 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 1977 Instruction *EntryVal, 1978 const InductionDescriptor &ID) { 1979 // We shouldn't have to build scalar steps if we aren't vectorizing. 1980 assert(VF > 1 && "VF should be greater than one"); 1981 1982 // Get the value type and ensure it and the step have the same integer type. 1983 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 1984 assert(ScalarIVTy == Step->getType() && 1985 "Val and Step should have the same type"); 1986 1987 // We build scalar steps for both integer and floating-point induction 1988 // variables. Here, we determine the kind of arithmetic we will perform. 1989 Instruction::BinaryOps AddOp; 1990 Instruction::BinaryOps MulOp; 1991 if (ScalarIVTy->isIntegerTy()) { 1992 AddOp = Instruction::Add; 1993 MulOp = Instruction::Mul; 1994 } else { 1995 AddOp = ID.getInductionOpcode(); 1996 MulOp = Instruction::FMul; 1997 } 1998 1999 // Determine the number of scalars we need to generate for each unroll 2000 // iteration. If EntryVal is uniform, we only need to generate the first 2001 // lane. Otherwise, we generate all VF values. 2002 unsigned Lanes = 2003 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1 2004 : VF; 2005 // Compute the scalar steps and save the results in VectorLoopValueMap. 2006 for (unsigned Part = 0; Part < UF; ++Part) { 2007 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2008 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2009 auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step)); 2010 auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul)); 2011 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add); 2012 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane); 2013 } 2014 } 2015 } 2016 2017 Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) { 2018 assert(V != Induction && "The new induction variable should not be used."); 2019 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2020 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2021 2022 // If we have a stride that is replaced by one, do it here. Defer this for 2023 // the VPlan-native path until we start running Legal checks in that path. 2024 if (!EnableVPlanNativePath && Legal->hasStride(V)) 2025 V = ConstantInt::get(V->getType(), 1); 2026 2027 // If we have a vector mapped to this value, return it. 2028 if (VectorLoopValueMap.hasVectorValue(V, Part)) 2029 return VectorLoopValueMap.getVectorValue(V, Part); 2030 2031 // If the value has not been vectorized, check if it has been scalarized 2032 // instead. If it has been scalarized, and we actually need the value in 2033 // vector form, we will construct the vector values on demand. 2034 if (VectorLoopValueMap.hasAnyScalarValue(V)) { 2035 Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0}); 2036 2037 // If we've scalarized a value, that value should be an instruction. 2038 auto *I = cast<Instruction>(V); 2039 2040 // If we aren't vectorizing, we can just copy the scalar map values over to 2041 // the vector map. 2042 if (VF == 1) { 2043 VectorLoopValueMap.setVectorValue(V, Part, ScalarValue); 2044 return ScalarValue; 2045 } 2046 2047 // Get the last scalar instruction we generated for V and Part. If the value 2048 // is known to be uniform after vectorization, this corresponds to lane zero 2049 // of the Part unroll iteration. Otherwise, the last instruction is the one 2050 // we created for the last vector lane of the Part unroll iteration. 2051 unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1; 2052 auto *LastInst = cast<Instruction>( 2053 VectorLoopValueMap.getScalarValue(V, {Part, LastLane})); 2054 2055 // Set the insert point after the last scalarized instruction. This ensures 2056 // the insertelement sequence will directly follow the scalar definitions. 2057 auto OldIP = Builder.saveIP(); 2058 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2059 Builder.SetInsertPoint(&*NewIP); 2060 2061 // However, if we are vectorizing, we need to construct the vector values. 2062 // If the value is known to be uniform after vectorization, we can just 2063 // broadcast the scalar value corresponding to lane zero for each unroll 2064 // iteration. Otherwise, we construct the vector values using insertelement 2065 // instructions. Since the resulting vectors are stored in 2066 // VectorLoopValueMap, we will only generate the insertelements once. 2067 Value *VectorValue = nullptr; 2068 if (Cost->isUniformAfterVectorization(I, VF)) { 2069 VectorValue = getBroadcastInstrs(ScalarValue); 2070 VectorLoopValueMap.setVectorValue(V, Part, VectorValue); 2071 } else { 2072 // Initialize packing with insertelements to start from undef. 2073 Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF)); 2074 VectorLoopValueMap.setVectorValue(V, Part, Undef); 2075 for (unsigned Lane = 0; Lane < VF; ++Lane) 2076 packScalarIntoVectorValue(V, {Part, Lane}); 2077 VectorValue = VectorLoopValueMap.getVectorValue(V, Part); 2078 } 2079 Builder.restoreIP(OldIP); 2080 return VectorValue; 2081 } 2082 2083 // If this scalar is unknown, assume that it is a constant or that it is 2084 // loop invariant. Broadcast V and save the value for future uses. 2085 Value *B = getBroadcastInstrs(V); 2086 VectorLoopValueMap.setVectorValue(V, Part, B); 2087 return B; 2088 } 2089 2090 Value * 2091 InnerLoopVectorizer::getOrCreateScalarValue(Value *V, 2092 const VPIteration &Instance) { 2093 // If the value is not an instruction contained in the loop, it should 2094 // already be scalar. 2095 if (OrigLoop->isLoopInvariant(V)) 2096 return V; 2097 2098 assert(Instance.Lane > 0 2099 ? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF) 2100 : true && "Uniform values only have lane zero"); 2101 2102 // If the value from the original loop has not been vectorized, it is 2103 // represented by UF x VF scalar values in the new loop. Return the requested 2104 // scalar value. 2105 if (VectorLoopValueMap.hasScalarValue(V, Instance)) 2106 return VectorLoopValueMap.getScalarValue(V, Instance); 2107 2108 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2109 // for the given unroll part. If this entry is not a vector type (i.e., the 2110 // vectorization factor is one), there is no need to generate an 2111 // extractelement instruction. 2112 auto *U = getOrCreateVectorValue(V, Instance.Part); 2113 if (!U->getType()->isVectorTy()) { 2114 assert(VF == 1 && "Value not scalarized has non-vector type"); 2115 return U; 2116 } 2117 2118 // Otherwise, the value from the original loop has been vectorized and is 2119 // represented by UF vector values. Extract and return the requested scalar 2120 // value from the appropriate vector lane. 2121 return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane)); 2122 } 2123 2124 void InnerLoopVectorizer::packScalarIntoVectorValue( 2125 Value *V, const VPIteration &Instance) { 2126 assert(V != Induction && "The new induction variable should not be used."); 2127 assert(!V->getType()->isVectorTy() && "Can't pack a vector"); 2128 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2129 2130 Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance); 2131 Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part); 2132 VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst, 2133 Builder.getInt32(Instance.Lane)); 2134 VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue); 2135 } 2136 2137 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2138 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2139 SmallVector<int, 8> ShuffleMask; 2140 for (unsigned i = 0; i < VF; ++i) 2141 ShuffleMask.push_back(VF - i - 1); 2142 2143 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2144 ShuffleMask, "reverse"); 2145 } 2146 2147 // Return whether we allow using masked interleave-groups (for dealing with 2148 // strided loads/stores that reside in predicated blocks, or for dealing 2149 // with gaps). 2150 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2151 // If an override option has been passed in for interleaved accesses, use it. 2152 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2153 return EnableMaskedInterleavedMemAccesses; 2154 2155 return TTI.enableMaskedInterleavedAccessVectorization(); 2156 } 2157 2158 // Try to vectorize the interleave group that \p Instr belongs to. 2159 // 2160 // E.g. Translate following interleaved load group (factor = 3): 2161 // for (i = 0; i < N; i+=3) { 2162 // R = Pic[i]; // Member of index 0 2163 // G = Pic[i+1]; // Member of index 1 2164 // B = Pic[i+2]; // Member of index 2 2165 // ... // do something to R, G, B 2166 // } 2167 // To: 2168 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2169 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2170 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2171 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2172 // 2173 // Or translate following interleaved store group (factor = 3): 2174 // for (i = 0; i < N; i+=3) { 2175 // ... do something to R, G, B 2176 // Pic[i] = R; // Member of index 0 2177 // Pic[i+1] = G; // Member of index 1 2178 // Pic[i+2] = B; // Member of index 2 2179 // } 2180 // To: 2181 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2182 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2183 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2184 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2185 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2186 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2187 const InterleaveGroup<Instruction> *Group, VPTransformState &State, 2188 VPValue *Addr, VPValue *BlockInMask) { 2189 Instruction *Instr = Group->getInsertPos(); 2190 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2191 2192 // Prepare for the vector type of the interleaved load/store. 2193 Type *ScalarTy = getMemInstValueType(Instr); 2194 unsigned InterleaveFactor = Group->getFactor(); 2195 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2196 2197 // Prepare for the new pointers. 2198 SmallVector<Value *, 2> AddrParts; 2199 unsigned Index = Group->getIndex(Instr); 2200 2201 // TODO: extend the masked interleaved-group support to reversed access. 2202 assert((!BlockInMask || !Group->isReverse()) && 2203 "Reversed masked interleave-group not supported."); 2204 2205 // If the group is reverse, adjust the index to refer to the last vector lane 2206 // instead of the first. We adjust the index from the first vector lane, 2207 // rather than directly getting the pointer for lane VF - 1, because the 2208 // pointer operand of the interleaved access is supposed to be uniform. For 2209 // uniform instructions, we're only required to generate a value for the 2210 // first vector lane in each unroll iteration. 2211 if (Group->isReverse()) 2212 Index += (VF - 1) * Group->getFactor(); 2213 2214 for (unsigned Part = 0; Part < UF; Part++) { 2215 Value *AddrPart = State.get(Addr, {Part, 0}); 2216 setDebugLocFromInst(Builder, AddrPart); 2217 2218 // Notice current instruction could be any index. Need to adjust the address 2219 // to the member of index 0. 2220 // 2221 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2222 // b = A[i]; // Member of index 0 2223 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2224 // 2225 // E.g. A[i+1] = a; // Member of index 1 2226 // A[i] = b; // Member of index 0 2227 // A[i+2] = c; // Member of index 2 (Current instruction) 2228 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2229 2230 bool InBounds = false; 2231 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2232 InBounds = gep->isInBounds(); 2233 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2234 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2235 2236 // Cast to the vector pointer type. 2237 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2238 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2239 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2240 } 2241 2242 setDebugLocFromInst(Builder, Instr); 2243 Value *UndefVec = UndefValue::get(VecTy); 2244 2245 Value *MaskForGaps = nullptr; 2246 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2247 MaskForGaps = createBitMaskForGaps(Builder, VF, *Group); 2248 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2249 } 2250 2251 // Vectorize the interleaved load group. 2252 if (isa<LoadInst>(Instr)) { 2253 // For each unroll part, create a wide load for the group. 2254 SmallVector<Value *, 2> NewLoads; 2255 for (unsigned Part = 0; Part < UF; Part++) { 2256 Instruction *NewLoad; 2257 if (BlockInMask || MaskForGaps) { 2258 assert(useMaskedInterleavedAccesses(*TTI) && 2259 "masked interleaved groups are not allowed."); 2260 Value *GroupMask = MaskForGaps; 2261 if (BlockInMask) { 2262 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2263 auto *Undefs = UndefValue::get(BlockInMaskPart->getType()); 2264 Value *ShuffledMask = Builder.CreateShuffleVector( 2265 BlockInMaskPart, Undefs, 2266 createReplicatedMask(InterleaveFactor, VF), "interleaved.mask"); 2267 GroupMask = MaskForGaps 2268 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2269 MaskForGaps) 2270 : ShuffledMask; 2271 } 2272 NewLoad = 2273 Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), 2274 GroupMask, UndefVec, "wide.masked.vec"); 2275 } 2276 else 2277 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2278 Group->getAlign(), "wide.vec"); 2279 Group->addMetadata(NewLoad); 2280 NewLoads.push_back(NewLoad); 2281 } 2282 2283 // For each member in the group, shuffle out the appropriate data from the 2284 // wide loads. 2285 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2286 Instruction *Member = Group->getMember(I); 2287 2288 // Skip the gaps in the group. 2289 if (!Member) 2290 continue; 2291 2292 auto StrideMask = createStrideMask(I, InterleaveFactor, VF); 2293 for (unsigned Part = 0; Part < UF; Part++) { 2294 Value *StridedVec = Builder.CreateShuffleVector( 2295 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2296 2297 // If this member has different type, cast the result type. 2298 if (Member->getType() != ScalarTy) { 2299 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2300 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2301 } 2302 2303 if (Group->isReverse()) 2304 StridedVec = reverseVector(StridedVec); 2305 2306 VectorLoopValueMap.setVectorValue(Member, Part, StridedVec); 2307 } 2308 } 2309 return; 2310 } 2311 2312 // The sub vector type for current instruction. 2313 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2314 2315 // Vectorize the interleaved store group. 2316 for (unsigned Part = 0; Part < UF; Part++) { 2317 // Collect the stored vector from each member. 2318 SmallVector<Value *, 4> StoredVecs; 2319 for (unsigned i = 0; i < InterleaveFactor; i++) { 2320 // Interleaved store group doesn't allow a gap, so each index has a member 2321 Instruction *Member = Group->getMember(i); 2322 assert(Member && "Fail to get a member from an interleaved store group"); 2323 2324 Value *StoredVec = getOrCreateVectorValue( 2325 cast<StoreInst>(Member)->getValueOperand(), Part); 2326 if (Group->isReverse()) 2327 StoredVec = reverseVector(StoredVec); 2328 2329 // If this member has different type, cast it to a unified type. 2330 2331 if (StoredVec->getType() != SubVT) 2332 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2333 2334 StoredVecs.push_back(StoredVec); 2335 } 2336 2337 // Concatenate all vectors into a wide vector. 2338 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2339 2340 // Interleave the elements in the wide vector. 2341 Value *IVec = Builder.CreateShuffleVector( 2342 WideVec, UndefVec, createInterleaveMask(VF, InterleaveFactor), 2343 "interleaved.vec"); 2344 2345 Instruction *NewStoreInstr; 2346 if (BlockInMask) { 2347 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2348 auto *Undefs = UndefValue::get(BlockInMaskPart->getType()); 2349 Value *ShuffledMask = Builder.CreateShuffleVector( 2350 BlockInMaskPart, Undefs, createReplicatedMask(InterleaveFactor, VF), 2351 "interleaved.mask"); 2352 NewStoreInstr = Builder.CreateMaskedStore( 2353 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2354 } 2355 else 2356 NewStoreInstr = 2357 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2358 2359 Group->addMetadata(NewStoreInstr); 2360 } 2361 } 2362 2363 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, 2364 VPTransformState &State, 2365 VPValue *Addr, 2366 VPValue *StoredValue, 2367 VPValue *BlockInMask) { 2368 // Attempt to issue a wide load. 2369 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2370 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2371 2372 assert((LI || SI) && "Invalid Load/Store instruction"); 2373 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2374 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2375 2376 LoopVectorizationCostModel::InstWidening Decision = 2377 Cost->getWideningDecision(Instr, VF); 2378 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2379 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2380 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2381 "CM decision is not to widen the memory instruction"); 2382 2383 Type *ScalarDataTy = getMemInstValueType(Instr); 2384 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2385 // An alignment of 0 means target abi alignment. We need to use the scalar's 2386 // target abi alignment in such a case. 2387 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2388 const Align Alignment = 2389 DL.getValueOrABITypeAlignment(getLoadStoreAlignment(Instr), ScalarDataTy); 2390 2391 // Determine if the pointer operand of the access is either consecutive or 2392 // reverse consecutive. 2393 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2394 bool ConsecutiveStride = 2395 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2396 bool CreateGatherScatter = 2397 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2398 2399 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2400 // gather/scatter. Otherwise Decision should have been to Scalarize. 2401 assert((ConsecutiveStride || CreateGatherScatter) && 2402 "The instruction should be scalarized"); 2403 (void)ConsecutiveStride; 2404 2405 VectorParts BlockInMaskParts(UF); 2406 bool isMaskRequired = BlockInMask; 2407 if (isMaskRequired) 2408 for (unsigned Part = 0; Part < UF; ++Part) 2409 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2410 2411 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2412 // Calculate the pointer for the specific unroll-part. 2413 GetElementPtrInst *PartPtr = nullptr; 2414 2415 bool InBounds = false; 2416 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2417 InBounds = gep->isInBounds(); 2418 2419 if (Reverse) { 2420 // If the address is consecutive but reversed, then the 2421 // wide store needs to start at the last vector element. 2422 PartPtr = cast<GetElementPtrInst>( 2423 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(-Part * VF))); 2424 PartPtr->setIsInBounds(InBounds); 2425 PartPtr = cast<GetElementPtrInst>( 2426 Builder.CreateGEP(ScalarDataTy, PartPtr, Builder.getInt32(1 - VF))); 2427 PartPtr->setIsInBounds(InBounds); 2428 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2429 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2430 } else { 2431 PartPtr = cast<GetElementPtrInst>( 2432 Builder.CreateGEP(ScalarDataTy, Ptr, Builder.getInt32(Part * VF))); 2433 PartPtr->setIsInBounds(InBounds); 2434 } 2435 2436 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2437 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2438 }; 2439 2440 // Handle Stores: 2441 if (SI) { 2442 setDebugLocFromInst(Builder, SI); 2443 2444 for (unsigned Part = 0; Part < UF; ++Part) { 2445 Instruction *NewSI = nullptr; 2446 Value *StoredVal = State.get(StoredValue, Part); 2447 if (CreateGatherScatter) { 2448 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2449 Value *VectorGep = State.get(Addr, Part); 2450 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2451 MaskPart); 2452 } else { 2453 if (Reverse) { 2454 // If we store to reverse consecutive memory locations, then we need 2455 // to reverse the order of elements in the stored value. 2456 StoredVal = reverseVector(StoredVal); 2457 // We don't want to update the value in the map as it might be used in 2458 // another expression. So don't call resetVectorValue(StoredVal). 2459 } 2460 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2461 if (isMaskRequired) 2462 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2463 BlockInMaskParts[Part]); 2464 else 2465 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2466 } 2467 addMetadata(NewSI, SI); 2468 } 2469 return; 2470 } 2471 2472 // Handle loads. 2473 assert(LI && "Must have a load instruction"); 2474 setDebugLocFromInst(Builder, LI); 2475 for (unsigned Part = 0; Part < UF; ++Part) { 2476 Value *NewLI; 2477 if (CreateGatherScatter) { 2478 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2479 Value *VectorGep = State.get(Addr, Part); 2480 NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart, 2481 nullptr, "wide.masked.gather"); 2482 addMetadata(NewLI, LI); 2483 } else { 2484 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); 2485 if (isMaskRequired) 2486 NewLI = Builder.CreateMaskedLoad( 2487 VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy), 2488 "wide.masked.load"); 2489 else 2490 NewLI = 2491 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 2492 2493 // Add metadata to the load, but setVectorValue to the reverse shuffle. 2494 addMetadata(NewLI, LI); 2495 if (Reverse) 2496 NewLI = reverseVector(NewLI); 2497 } 2498 VectorLoopValueMap.setVectorValue(Instr, Part, NewLI); 2499 } 2500 } 2501 2502 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2503 const VPIteration &Instance, 2504 bool IfPredicateInstr) { 2505 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2506 2507 setDebugLocFromInst(Builder, Instr); 2508 2509 // Does this instruction return a value ? 2510 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2511 2512 Instruction *Cloned = Instr->clone(); 2513 if (!IsVoidRetTy) 2514 Cloned->setName(Instr->getName() + ".cloned"); 2515 2516 // Replace the operands of the cloned instructions with their scalar 2517 // equivalents in the new loop. 2518 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2519 auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance); 2520 Cloned->setOperand(op, NewOp); 2521 } 2522 addNewMetadata(Cloned, Instr); 2523 2524 // Place the cloned scalar in the new loop. 2525 Builder.Insert(Cloned); 2526 2527 // Add the cloned scalar to the scalar map entry. 2528 VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned); 2529 2530 // If we just cloned a new assumption, add it the assumption cache. 2531 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2532 if (II->getIntrinsicID() == Intrinsic::assume) 2533 AC->registerAssumption(II); 2534 2535 // End if-block. 2536 if (IfPredicateInstr) 2537 PredicatedInstructions.push_back(Cloned); 2538 } 2539 2540 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2541 Value *End, Value *Step, 2542 Instruction *DL) { 2543 BasicBlock *Header = L->getHeader(); 2544 BasicBlock *Latch = L->getLoopLatch(); 2545 // As we're just creating this loop, it's possible no latch exists 2546 // yet. If so, use the header as this will be a single block loop. 2547 if (!Latch) 2548 Latch = Header; 2549 2550 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2551 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 2552 setDebugLocFromInst(Builder, OldInst); 2553 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2554 2555 Builder.SetInsertPoint(Latch->getTerminator()); 2556 setDebugLocFromInst(Builder, OldInst); 2557 2558 // Create i+1 and fill the PHINode. 2559 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2560 Induction->addIncoming(Start, L->getLoopPreheader()); 2561 Induction->addIncoming(Next, Latch); 2562 // Create the compare. 2563 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2564 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2565 2566 // Now we have two terminators. Remove the old one from the block. 2567 Latch->getTerminator()->eraseFromParent(); 2568 2569 return Induction; 2570 } 2571 2572 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2573 if (TripCount) 2574 return TripCount; 2575 2576 assert(L && "Create Trip Count for null loop."); 2577 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2578 // Find the loop boundaries. 2579 ScalarEvolution *SE = PSE.getSE(); 2580 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2581 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2582 "Invalid loop count"); 2583 2584 Type *IdxTy = Legal->getWidestInductionType(); 2585 assert(IdxTy && "No type for induction"); 2586 2587 // The exit count might have the type of i64 while the phi is i32. This can 2588 // happen if we have an induction variable that is sign extended before the 2589 // compare. The only way that we get a backedge taken count is that the 2590 // induction variable was signed and as such will not overflow. In such a case 2591 // truncation is legal. 2592 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2593 IdxTy->getPrimitiveSizeInBits()) 2594 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2595 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2596 2597 // Get the total trip count from the count by adding 1. 2598 const SCEV *ExitCount = SE->getAddExpr( 2599 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2600 2601 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2602 2603 // Expand the trip count and place the new instructions in the preheader. 2604 // Notice that the pre-header does not change, only the loop body. 2605 SCEVExpander Exp(*SE, DL, "induction"); 2606 2607 // Count holds the overall loop count (N). 2608 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2609 L->getLoopPreheader()->getTerminator()); 2610 2611 if (TripCount->getType()->isPointerTy()) 2612 TripCount = 2613 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2614 L->getLoopPreheader()->getTerminator()); 2615 2616 return TripCount; 2617 } 2618 2619 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 2620 if (VectorTripCount) 2621 return VectorTripCount; 2622 2623 Value *TC = getOrCreateTripCount(L); 2624 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2625 2626 Type *Ty = TC->getType(); 2627 Constant *Step = ConstantInt::get(Ty, VF * UF); 2628 2629 // If the tail is to be folded by masking, round the number of iterations N 2630 // up to a multiple of Step instead of rounding down. This is done by first 2631 // adding Step-1 and then rounding down. Note that it's ok if this addition 2632 // overflows: the vector induction variable will eventually wrap to zero given 2633 // that it starts at zero and its Step is a power of two; the loop will then 2634 // exit, with the last early-exit vector comparison also producing all-true. 2635 if (Cost->foldTailByMasking()) { 2636 assert(isPowerOf2_32(VF * UF) && 2637 "VF*UF must be a power of 2 when folding tail by masking"); 2638 TC = Builder.CreateAdd(TC, ConstantInt::get(Ty, VF * UF - 1), "n.rnd.up"); 2639 } 2640 2641 // Now we need to generate the expression for the part of the loop that the 2642 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2643 // iterations are not required for correctness, or N - Step, otherwise. Step 2644 // is equal to the vectorization factor (number of SIMD elements) times the 2645 // unroll factor (number of SIMD instructions). 2646 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2647 2648 // If there is a non-reversed interleaved group that may speculatively access 2649 // memory out-of-bounds, we need to ensure that there will be at least one 2650 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 2651 // the trip count, we set the remainder to be equal to the step. If the step 2652 // does not evenly divide the trip count, no adjustment is necessary since 2653 // there will already be scalar iterations. Note that the minimum iterations 2654 // check ensures that N >= Step. 2655 if (VF > 1 && Cost->requiresScalarEpilogue()) { 2656 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2657 R = Builder.CreateSelect(IsZero, Step, R); 2658 } 2659 2660 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2661 2662 return VectorTripCount; 2663 } 2664 2665 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2666 const DataLayout &DL) { 2667 // Verify that V is a vector type with same number of elements as DstVTy. 2668 unsigned VF = DstVTy->getNumElements(); 2669 VectorType *SrcVecTy = cast<VectorType>(V->getType()); 2670 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2671 Type *SrcElemTy = SrcVecTy->getElementType(); 2672 Type *DstElemTy = DstVTy->getElementType(); 2673 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2674 "Vector elements must have same size"); 2675 2676 // Do a direct cast if element types are castable. 2677 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2678 return Builder.CreateBitOrPointerCast(V, DstVTy); 2679 } 2680 // V cannot be directly casted to desired vector type. 2681 // May happen when V is a floating point vector but DstVTy is a vector of 2682 // pointers or vice-versa. Handle this using a two-step bitcast using an 2683 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2684 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2685 "Only one type should be a pointer type"); 2686 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2687 "Only one type should be a floating point type"); 2688 Type *IntTy = 2689 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2690 VectorType *VecIntTy = VectorType::get(IntTy, VF); 2691 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2692 return Builder.CreateBitOrPointerCast(CastVal, DstVTy); 2693 } 2694 2695 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 2696 BasicBlock *Bypass) { 2697 Value *Count = getOrCreateTripCount(L); 2698 // Reuse existing vector loop preheader for TC checks. 2699 // Note that new preheader block is generated for vector loop. 2700 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2701 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2702 2703 // Generate code to check if the loop's trip count is less than VF * UF, or 2704 // equal to it in case a scalar epilogue is required; this implies that the 2705 // vector trip count is zero. This check also covers the case where adding one 2706 // to the backedge-taken count overflowed leading to an incorrect trip count 2707 // of zero. In this case we will also jump to the scalar loop. 2708 auto P = Cost->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE 2709 : ICmpInst::ICMP_ULT; 2710 2711 // If tail is to be folded, vector loop takes care of all iterations. 2712 Value *CheckMinIters = Builder.getFalse(); 2713 if (!Cost->foldTailByMasking()) 2714 CheckMinIters = Builder.CreateICmp( 2715 P, Count, ConstantInt::get(Count->getType(), VF * UF), 2716 "min.iters.check"); 2717 2718 // Create new preheader for vector loop. 2719 LoopVectorPreHeader = 2720 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2721 "vector.ph"); 2722 2723 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2724 DT->getNode(Bypass)->getIDom()) && 2725 "TC check is expected to dominate Bypass"); 2726 2727 // Update dominator for Bypass & LoopExit. 2728 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2729 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2730 2731 ReplaceInstWithInst( 2732 TCCheckBlock->getTerminator(), 2733 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2734 LoopBypassBlocks.push_back(TCCheckBlock); 2735 } 2736 2737 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 2738 // Reuse existing vector loop preheader for SCEV checks. 2739 // Note that new preheader block is generated for vector loop. 2740 BasicBlock *const SCEVCheckBlock = LoopVectorPreHeader; 2741 2742 // Generate the code to check that the SCEV assumptions that we made. 2743 // We want the new basic block to start at the first instruction in a 2744 // sequence of instructions that form a check. 2745 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 2746 "scev.check"); 2747 Value *SCEVCheck = Exp.expandCodeForPredicate( 2748 &PSE.getUnionPredicate(), SCEVCheckBlock->getTerminator()); 2749 2750 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 2751 if (C->isZero()) 2752 return; 2753 2754 assert(!SCEVCheckBlock->getParent()->hasOptSize() && 2755 "Cannot SCEV check stride or overflow when optimizing for size"); 2756 2757 SCEVCheckBlock->setName("vector.scevcheck"); 2758 // Create new preheader for vector loop. 2759 LoopVectorPreHeader = 2760 SplitBlock(SCEVCheckBlock, SCEVCheckBlock->getTerminator(), DT, LI, 2761 nullptr, "vector.ph"); 2762 2763 // Update dominator only if this is first RT check. 2764 if (LoopBypassBlocks.empty()) { 2765 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 2766 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 2767 } 2768 2769 ReplaceInstWithInst( 2770 SCEVCheckBlock->getTerminator(), 2771 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheck)); 2772 LoopBypassBlocks.push_back(SCEVCheckBlock); 2773 AddedSafetyChecks = true; 2774 } 2775 2776 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 2777 // VPlan-native path does not do any analysis for runtime checks currently. 2778 if (EnableVPlanNativePath) 2779 return; 2780 2781 // Reuse existing vector loop preheader for runtime memory checks. 2782 // Note that new preheader block is generated for vector loop. 2783 BasicBlock *const MemCheckBlock = L->getLoopPreheader(); 2784 2785 // Generate the code that checks in runtime if arrays overlap. We put the 2786 // checks into a separate block to make the more common case of few elements 2787 // faster. 2788 Instruction *FirstCheckInst; 2789 Instruction *MemRuntimeCheck; 2790 std::tie(FirstCheckInst, MemRuntimeCheck) = 2791 Legal->getLAI()->addRuntimeChecks(MemCheckBlock->getTerminator()); 2792 if (!MemRuntimeCheck) 2793 return; 2794 2795 if (MemCheckBlock->getParent()->hasOptSize()) { 2796 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 2797 "Cannot emit memory checks when optimizing for size, unless forced " 2798 "to vectorize."); 2799 ORE->emit([&]() { 2800 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 2801 L->getStartLoc(), L->getHeader()) 2802 << "Code-size may be reduced by not forcing " 2803 "vectorization, or by source-code modifications " 2804 "eliminating the need for runtime checks " 2805 "(e.g., adding 'restrict')."; 2806 }); 2807 } 2808 2809 MemCheckBlock->setName("vector.memcheck"); 2810 // Create new preheader for vector loop. 2811 LoopVectorPreHeader = 2812 SplitBlock(MemCheckBlock, MemCheckBlock->getTerminator(), DT, LI, nullptr, 2813 "vector.ph"); 2814 2815 // Update dominator only if this is first RT check. 2816 if (LoopBypassBlocks.empty()) { 2817 DT->changeImmediateDominator(Bypass, MemCheckBlock); 2818 DT->changeImmediateDominator(LoopExitBlock, MemCheckBlock); 2819 } 2820 2821 ReplaceInstWithInst( 2822 MemCheckBlock->getTerminator(), 2823 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheck)); 2824 LoopBypassBlocks.push_back(MemCheckBlock); 2825 AddedSafetyChecks = true; 2826 2827 // We currently don't use LoopVersioning for the actual loop cloning but we 2828 // still use it to add the noalias metadata. 2829 LVer = std::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 2830 PSE.getSE()); 2831 LVer->prepareNoAliasMetadata(); 2832 } 2833 2834 Value *InnerLoopVectorizer::emitTransformedIndex( 2835 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 2836 const InductionDescriptor &ID) const { 2837 2838 SCEVExpander Exp(*SE, DL, "induction"); 2839 auto Step = ID.getStep(); 2840 auto StartValue = ID.getStartValue(); 2841 assert(Index->getType() == Step->getType() && 2842 "Index type does not match StepValue type"); 2843 2844 // Note: the IR at this point is broken. We cannot use SE to create any new 2845 // SCEV and then expand it, hoping that SCEV's simplification will give us 2846 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2847 // lead to various SCEV crashes. So all we can do is to use builder and rely 2848 // on InstCombine for future simplifications. Here we handle some trivial 2849 // cases only. 2850 auto CreateAdd = [&B](Value *X, Value *Y) { 2851 assert(X->getType() == Y->getType() && "Types don't match!"); 2852 if (auto *CX = dyn_cast<ConstantInt>(X)) 2853 if (CX->isZero()) 2854 return Y; 2855 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2856 if (CY->isZero()) 2857 return X; 2858 return B.CreateAdd(X, Y); 2859 }; 2860 2861 auto CreateMul = [&B](Value *X, Value *Y) { 2862 assert(X->getType() == Y->getType() && "Types don't match!"); 2863 if (auto *CX = dyn_cast<ConstantInt>(X)) 2864 if (CX->isOne()) 2865 return Y; 2866 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2867 if (CY->isOne()) 2868 return X; 2869 return B.CreateMul(X, Y); 2870 }; 2871 2872 switch (ID.getKind()) { 2873 case InductionDescriptor::IK_IntInduction: { 2874 assert(Index->getType() == StartValue->getType() && 2875 "Index type does not match StartValue type"); 2876 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 2877 return B.CreateSub(StartValue, Index); 2878 auto *Offset = CreateMul( 2879 Index, Exp.expandCodeFor(Step, Index->getType(), &*B.GetInsertPoint())); 2880 return CreateAdd(StartValue, Offset); 2881 } 2882 case InductionDescriptor::IK_PtrInduction: { 2883 assert(isa<SCEVConstant>(Step) && 2884 "Expected constant step for pointer induction"); 2885 return B.CreateGEP( 2886 StartValue->getType()->getPointerElementType(), StartValue, 2887 CreateMul(Index, Exp.expandCodeFor(Step, Index->getType(), 2888 &*B.GetInsertPoint()))); 2889 } 2890 case InductionDescriptor::IK_FpInduction: { 2891 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2892 auto InductionBinOp = ID.getInductionBinOp(); 2893 assert(InductionBinOp && 2894 (InductionBinOp->getOpcode() == Instruction::FAdd || 2895 InductionBinOp->getOpcode() == Instruction::FSub) && 2896 "Original bin op should be defined for FP induction"); 2897 2898 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 2899 2900 // Floating point operations had to be 'fast' to enable the induction. 2901 FastMathFlags Flags; 2902 Flags.setFast(); 2903 2904 Value *MulExp = B.CreateFMul(StepValue, Index); 2905 if (isa<Instruction>(MulExp)) 2906 // We have to check, the MulExp may be a constant. 2907 cast<Instruction>(MulExp)->setFastMathFlags(Flags); 2908 2909 Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2910 "induction"); 2911 if (isa<Instruction>(BOp)) 2912 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2913 2914 return BOp; 2915 } 2916 case InductionDescriptor::IK_NoInduction: 2917 return nullptr; 2918 } 2919 llvm_unreachable("invalid enum"); 2920 } 2921 2922 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 2923 /* 2924 In this function we generate a new loop. The new loop will contain 2925 the vectorized instructions while the old loop will continue to run the 2926 scalar remainder. 2927 2928 [ ] <-- loop iteration number check. 2929 / | 2930 / v 2931 | [ ] <-- vector loop bypass (may consist of multiple blocks). 2932 | / | 2933 | / v 2934 || [ ] <-- vector pre header. 2935 |/ | 2936 | v 2937 | [ ] \ 2938 | [ ]_| <-- vector loop. 2939 | | 2940 | v 2941 | -[ ] <--- middle-block. 2942 | / | 2943 | / v 2944 -|- >[ ] <--- new preheader. 2945 | | 2946 | v 2947 | [ ] \ 2948 | [ ]_| <-- old scalar loop to handle remainder. 2949 \ | 2950 \ v 2951 >[ ] <-- exit block. 2952 ... 2953 */ 2954 2955 MDNode *OrigLoopID = OrigLoop->getLoopID(); 2956 2957 // Some loops have a single integer induction variable, while other loops 2958 // don't. One example is c++ iterators that often have multiple pointer 2959 // induction variables. In the code below we also support a case where we 2960 // don't have a single induction variable. 2961 // 2962 // We try to obtain an induction variable from the original loop as hard 2963 // as possible. However if we don't find one that: 2964 // - is an integer 2965 // - counts from zero, stepping by one 2966 // - is the size of the widest induction variable type 2967 // then we create a new one. 2968 OldInduction = Legal->getPrimaryInduction(); 2969 Type *IdxTy = Legal->getWidestInductionType(); 2970 2971 // Split the single block loop into the two loop structure described above. 2972 LoopScalarBody = OrigLoop->getHeader(); 2973 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 2974 LoopExitBlock = OrigLoop->getExitBlock(); 2975 assert(LoopExitBlock && "Must have an exit block"); 2976 assert(LoopVectorPreHeader && "Invalid loop structure"); 2977 2978 LoopMiddleBlock = 2979 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 2980 LI, nullptr, "middle.block"); 2981 LoopScalarPreHeader = 2982 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 2983 nullptr, "scalar.ph"); 2984 // We intentionally don't let SplitBlock to update LoopInfo since 2985 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 2986 // LoopVectorBody is explicitly added to the correct place few lines later. 2987 LoopVectorBody = 2988 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 2989 nullptr, nullptr, "vector.body"); 2990 2991 // Update dominator for loop exit. 2992 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 2993 2994 // Create and register the new vector loop. 2995 Loop *Lp = LI->AllocateLoop(); 2996 Loop *ParentLoop = OrigLoop->getParentLoop(); 2997 2998 // Insert the new loop into the loop nest and register the new basic blocks 2999 // before calling any utilities such as SCEV that require valid LoopInfo. 3000 if (ParentLoop) { 3001 ParentLoop->addChildLoop(Lp); 3002 } else { 3003 LI->addTopLevelLoop(Lp); 3004 } 3005 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3006 3007 // Find the loop boundaries. 3008 Value *Count = getOrCreateTripCount(Lp); 3009 3010 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3011 3012 // Now, compare the new count to zero. If it is zero skip the vector loop and 3013 // jump to the scalar loop. This check also covers the case where the 3014 // backedge-taken count is uint##_max: adding one to it will overflow leading 3015 // to an incorrect trip count of zero. In this (rare) case we will also jump 3016 // to the scalar loop. 3017 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3018 3019 // Generate the code to check any assumptions that we've made for SCEV 3020 // expressions. 3021 emitSCEVChecks(Lp, LoopScalarPreHeader); 3022 3023 // Generate the code that checks in runtime if arrays overlap. We put the 3024 // checks into a separate block to make the more common case of few elements 3025 // faster. 3026 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3027 3028 // Generate the induction variable. 3029 // The loop step is equal to the vectorization factor (num of SIMD elements) 3030 // times the unroll factor (num of SIMD instructions). 3031 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3032 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3033 Induction = 3034 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3035 getDebugLocFromInstOrOperands(OldInduction)); 3036 3037 // We are going to resume the execution of the scalar loop. 3038 // Go over all of the induction variables that we found and fix the 3039 // PHIs that are left in the scalar version of the loop. 3040 // The starting values of PHI nodes depend on the counter of the last 3041 // iteration in the vectorized loop. 3042 // If we come from a bypass edge then we need to start from the original 3043 // start value. 3044 3045 // This variable saves the new starting index for the scalar loop. It is used 3046 // to test if there are any tail iterations left once the vector loop has 3047 // completed. 3048 for (auto &InductionEntry : Legal->getInductionVars()) { 3049 PHINode *OrigPhi = InductionEntry.first; 3050 InductionDescriptor II = InductionEntry.second; 3051 3052 // Create phi nodes to merge from the backedge-taken check block. 3053 PHINode *BCResumeVal = 3054 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3055 LoopScalarPreHeader->getTerminator()); 3056 // Copy original phi DL over to the new one. 3057 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3058 Value *&EndValue = IVEndValues[OrigPhi]; 3059 if (OrigPhi == OldInduction) { 3060 // We know what the end value is. 3061 EndValue = CountRoundDown; 3062 } else { 3063 IRBuilder<> B(Lp->getLoopPreheader()->getTerminator()); 3064 Type *StepType = II.getStep()->getType(); 3065 Instruction::CastOps CastOp = 3066 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3067 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3068 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3069 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3070 EndValue->setName("ind.end"); 3071 } 3072 3073 // The new PHI merges the original incoming value, in case of a bypass, 3074 // or the value at the end of the vectorized loop. 3075 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3076 3077 // Fix the scalar body counter (PHI node). 3078 // The old induction's phi node in the scalar body needs the truncated 3079 // value. 3080 for (BasicBlock *BB : LoopBypassBlocks) 3081 BCResumeVal->addIncoming(II.getStartValue(), BB); 3082 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3083 } 3084 3085 // We need the OrigLoop (scalar loop part) latch terminator to help 3086 // produce correct debug info for the middle block BB instructions. 3087 // The legality check stage guarantees that the loop will have a single 3088 // latch. 3089 assert(isa<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()) && 3090 "Scalar loop latch terminator isn't a branch"); 3091 BranchInst *ScalarLatchBr = 3092 cast<BranchInst>(OrigLoop->getLoopLatch()->getTerminator()); 3093 3094 // Add a check in the middle block to see if we have completed 3095 // all of the iterations in the first vector loop. 3096 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3097 // If tail is to be folded, we know we don't need to run the remainder. 3098 Value *CmpN = Builder.getTrue(); 3099 if (!Cost->foldTailByMasking()) { 3100 CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3101 CountRoundDown, "cmp.n", 3102 LoopMiddleBlock->getTerminator()); 3103 3104 // Here we use the same DebugLoc as the scalar loop latch branch instead 3105 // of the corresponding compare because they may have ended up with 3106 // different line numbers and we want to avoid awkward line stepping while 3107 // debugging. Eg. if the compare has got a line number inside the loop. 3108 cast<Instruction>(CmpN)->setDebugLoc(ScalarLatchBr->getDebugLoc()); 3109 } 3110 3111 BranchInst *BrInst = 3112 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, CmpN); 3113 BrInst->setDebugLoc(ScalarLatchBr->getDebugLoc()); 3114 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3115 3116 // Get ready to start creating new instructions into the vectorized body. 3117 assert(LoopVectorPreHeader == Lp->getLoopPreheader() && 3118 "Inconsistent vector loop preheader"); 3119 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3120 3121 Optional<MDNode *> VectorizedLoopID = 3122 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3123 LLVMLoopVectorizeFollowupVectorized}); 3124 if (VectorizedLoopID.hasValue()) { 3125 Lp->setLoopID(VectorizedLoopID.getValue()); 3126 3127 // Do not setAlreadyVectorized if loop attributes have been defined 3128 // explicitly. 3129 return LoopVectorPreHeader; 3130 } 3131 3132 // Keep all loop hints from the original loop on the vector loop (we'll 3133 // replace the vectorizer-specific hints below). 3134 if (MDNode *LID = OrigLoop->getLoopID()) 3135 Lp->setLoopID(LID); 3136 3137 LoopVectorizeHints Hints(Lp, true, *ORE); 3138 Hints.setAlreadyVectorized(); 3139 3140 #ifdef EXPENSIVE_CHECKS 3141 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3142 LI->verify(*DT); 3143 #endif 3144 3145 return LoopVectorPreHeader; 3146 } 3147 3148 // Fix up external users of the induction variable. At this point, we are 3149 // in LCSSA form, with all external PHIs that use the IV having one input value, 3150 // coming from the remainder loop. We need those PHIs to also have a correct 3151 // value for the IV when arriving directly from the middle block. 3152 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3153 const InductionDescriptor &II, 3154 Value *CountRoundDown, Value *EndValue, 3155 BasicBlock *MiddleBlock) { 3156 // There are two kinds of external IV usages - those that use the value 3157 // computed in the last iteration (the PHI) and those that use the penultimate 3158 // value (the value that feeds into the phi from the loop latch). 3159 // We allow both, but they, obviously, have different values. 3160 3161 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3162 3163 DenseMap<Value *, Value *> MissingVals; 3164 3165 // An external user of the last iteration's value should see the value that 3166 // the remainder loop uses to initialize its own IV. 3167 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3168 for (User *U : PostInc->users()) { 3169 Instruction *UI = cast<Instruction>(U); 3170 if (!OrigLoop->contains(UI)) { 3171 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3172 MissingVals[UI] = EndValue; 3173 } 3174 } 3175 3176 // An external user of the penultimate value need to see EndValue - Step. 3177 // The simplest way to get this is to recompute it from the constituent SCEVs, 3178 // that is Start + (Step * (CRD - 1)). 3179 for (User *U : OrigPhi->users()) { 3180 auto *UI = cast<Instruction>(U); 3181 if (!OrigLoop->contains(UI)) { 3182 const DataLayout &DL = 3183 OrigLoop->getHeader()->getModule()->getDataLayout(); 3184 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3185 3186 IRBuilder<> B(MiddleBlock->getTerminator()); 3187 Value *CountMinusOne = B.CreateSub( 3188 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3189 Value *CMO = 3190 !II.getStep()->getType()->isIntegerTy() 3191 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3192 II.getStep()->getType()) 3193 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3194 CMO->setName("cast.cmo"); 3195 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3196 Escape->setName("ind.escape"); 3197 MissingVals[UI] = Escape; 3198 } 3199 } 3200 3201 for (auto &I : MissingVals) { 3202 PHINode *PHI = cast<PHINode>(I.first); 3203 // One corner case we have to handle is two IVs "chasing" each-other, 3204 // that is %IV2 = phi [...], [ %IV1, %latch ] 3205 // In this case, if IV1 has an external use, we need to avoid adding both 3206 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3207 // don't already have an incoming value for the middle block. 3208 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3209 PHI->addIncoming(I.second, MiddleBlock); 3210 } 3211 } 3212 3213 namespace { 3214 3215 struct CSEDenseMapInfo { 3216 static bool canHandle(const Instruction *I) { 3217 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3218 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3219 } 3220 3221 static inline Instruction *getEmptyKey() { 3222 return DenseMapInfo<Instruction *>::getEmptyKey(); 3223 } 3224 3225 static inline Instruction *getTombstoneKey() { 3226 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3227 } 3228 3229 static unsigned getHashValue(const Instruction *I) { 3230 assert(canHandle(I) && "Unknown instruction!"); 3231 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3232 I->value_op_end())); 3233 } 3234 3235 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3236 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3237 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3238 return LHS == RHS; 3239 return LHS->isIdenticalTo(RHS); 3240 } 3241 }; 3242 3243 } // end anonymous namespace 3244 3245 ///Perform cse of induction variable instructions. 3246 static void cse(BasicBlock *BB) { 3247 // Perform simple cse. 3248 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3249 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3250 Instruction *In = &*I++; 3251 3252 if (!CSEDenseMapInfo::canHandle(In)) 3253 continue; 3254 3255 // Check if we can replace this instruction with any of the 3256 // visited instructions. 3257 if (Instruction *V = CSEMap.lookup(In)) { 3258 In->replaceAllUsesWith(V); 3259 In->eraseFromParent(); 3260 continue; 3261 } 3262 3263 CSEMap[In] = In; 3264 } 3265 } 3266 3267 unsigned LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, 3268 unsigned VF, 3269 bool &NeedToScalarize) { 3270 Function *F = CI->getCalledFunction(); 3271 Type *ScalarRetTy = CI->getType(); 3272 SmallVector<Type *, 4> Tys, ScalarTys; 3273 for (auto &ArgOp : CI->arg_operands()) 3274 ScalarTys.push_back(ArgOp->getType()); 3275 3276 // Estimate cost of scalarized vector call. The source operands are assumed 3277 // to be vectors, so we need to extract individual elements from there, 3278 // execute VF scalar calls, and then gather the result into the vector return 3279 // value. 3280 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, 3281 TTI::TCK_RecipThroughput); 3282 if (VF == 1) 3283 return ScalarCallCost; 3284 3285 // Compute corresponding vector type for return value and arguments. 3286 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3287 for (Type *ScalarTy : ScalarTys) 3288 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3289 3290 // Compute costs of unpacking argument values for the scalar calls and 3291 // packing the return values to a vector. 3292 unsigned ScalarizationCost = getScalarizationOverhead(CI, VF); 3293 3294 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3295 3296 // If we can't emit a vector call for this function, then the currently found 3297 // cost is the cost we need to return. 3298 NeedToScalarize = true; 3299 VFShape Shape = VFShape::get(*CI, {VF, false}, false /*HasGlobalPred*/); 3300 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3301 3302 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3303 return Cost; 3304 3305 // If the corresponding vector cost is cheaper, return its cost. 3306 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys, 3307 TTI::TCK_RecipThroughput); 3308 if (VectorCallCost < Cost) { 3309 NeedToScalarize = false; 3310 return VectorCallCost; 3311 } 3312 return Cost; 3313 } 3314 3315 unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3316 unsigned VF) { 3317 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3318 assert(ID && "Expected intrinsic call!"); 3319 3320 FastMathFlags FMF; 3321 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3322 FMF = FPMO->getFastMathFlags(); 3323 3324 SmallVector<Value *, 4> Operands(CI->arg_operands()); 3325 return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF, 3326 TargetTransformInfo::TCK_RecipThroughput, 3327 CI); 3328 } 3329 3330 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3331 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3332 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3333 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3334 } 3335 3336 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3337 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3338 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3339 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3340 } 3341 3342 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3343 // For every instruction `I` in MinBWs, truncate the operands, create a 3344 // truncated version of `I` and reextend its result. InstCombine runs 3345 // later and will remove any ext/trunc pairs. 3346 SmallPtrSet<Value *, 4> Erased; 3347 for (const auto &KV : Cost->getMinimalBitwidths()) { 3348 // If the value wasn't vectorized, we must maintain the original scalar 3349 // type. The absence of the value from VectorLoopValueMap indicates that it 3350 // wasn't vectorized. 3351 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3352 continue; 3353 for (unsigned Part = 0; Part < UF; ++Part) { 3354 Value *I = getOrCreateVectorValue(KV.first, Part); 3355 if (Erased.find(I) != Erased.end() || I->use_empty() || 3356 !isa<Instruction>(I)) 3357 continue; 3358 Type *OriginalTy = I->getType(); 3359 Type *ScalarTruncatedTy = 3360 IntegerType::get(OriginalTy->getContext(), KV.second); 3361 Type *TruncatedTy = VectorType::get( 3362 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getNumElements()); 3363 if (TruncatedTy == OriginalTy) 3364 continue; 3365 3366 IRBuilder<> B(cast<Instruction>(I)); 3367 auto ShrinkOperand = [&](Value *V) -> Value * { 3368 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3369 if (ZI->getSrcTy() == TruncatedTy) 3370 return ZI->getOperand(0); 3371 return B.CreateZExtOrTrunc(V, TruncatedTy); 3372 }; 3373 3374 // The actual instruction modification depends on the instruction type, 3375 // unfortunately. 3376 Value *NewI = nullptr; 3377 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3378 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3379 ShrinkOperand(BO->getOperand(1))); 3380 3381 // Any wrapping introduced by shrinking this operation shouldn't be 3382 // considered undefined behavior. So, we can't unconditionally copy 3383 // arithmetic wrapping flags to NewI. 3384 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3385 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3386 NewI = 3387 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3388 ShrinkOperand(CI->getOperand(1))); 3389 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3390 NewI = B.CreateSelect(SI->getCondition(), 3391 ShrinkOperand(SI->getTrueValue()), 3392 ShrinkOperand(SI->getFalseValue())); 3393 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3394 switch (CI->getOpcode()) { 3395 default: 3396 llvm_unreachable("Unhandled cast!"); 3397 case Instruction::Trunc: 3398 NewI = ShrinkOperand(CI->getOperand(0)); 3399 break; 3400 case Instruction::SExt: 3401 NewI = B.CreateSExtOrTrunc( 3402 CI->getOperand(0), 3403 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3404 break; 3405 case Instruction::ZExt: 3406 NewI = B.CreateZExtOrTrunc( 3407 CI->getOperand(0), 3408 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3409 break; 3410 } 3411 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3412 auto Elements0 = 3413 cast<VectorType>(SI->getOperand(0)->getType())->getNumElements(); 3414 auto *O0 = B.CreateZExtOrTrunc( 3415 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3416 auto Elements1 = 3417 cast<VectorType>(SI->getOperand(1)->getType())->getNumElements(); 3418 auto *O1 = B.CreateZExtOrTrunc( 3419 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3420 3421 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3422 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3423 // Don't do anything with the operands, just extend the result. 3424 continue; 3425 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3426 auto Elements = 3427 cast<VectorType>(IE->getOperand(0)->getType())->getNumElements(); 3428 auto *O0 = B.CreateZExtOrTrunc( 3429 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3430 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3431 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3432 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3433 auto Elements = 3434 cast<VectorType>(EE->getOperand(0)->getType())->getNumElements(); 3435 auto *O0 = B.CreateZExtOrTrunc( 3436 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3437 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3438 } else { 3439 // If we don't know what to do, be conservative and don't do anything. 3440 continue; 3441 } 3442 3443 // Lastly, extend the result. 3444 NewI->takeName(cast<Instruction>(I)); 3445 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3446 I->replaceAllUsesWith(Res); 3447 cast<Instruction>(I)->eraseFromParent(); 3448 Erased.insert(I); 3449 VectorLoopValueMap.resetVectorValue(KV.first, Part, Res); 3450 } 3451 } 3452 3453 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3454 for (const auto &KV : Cost->getMinimalBitwidths()) { 3455 // If the value wasn't vectorized, we must maintain the original scalar 3456 // type. The absence of the value from VectorLoopValueMap indicates that it 3457 // wasn't vectorized. 3458 if (!VectorLoopValueMap.hasAnyVectorValue(KV.first)) 3459 continue; 3460 for (unsigned Part = 0; Part < UF; ++Part) { 3461 Value *I = getOrCreateVectorValue(KV.first, Part); 3462 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3463 if (Inst && Inst->use_empty()) { 3464 Value *NewI = Inst->getOperand(0); 3465 Inst->eraseFromParent(); 3466 VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI); 3467 } 3468 } 3469 } 3470 } 3471 3472 void InnerLoopVectorizer::fixVectorizedLoop() { 3473 // Insert truncates and extends for any truncated instructions as hints to 3474 // InstCombine. 3475 if (VF > 1) 3476 truncateToMinimalBitwidths(); 3477 3478 // Fix widened non-induction PHIs by setting up the PHI operands. 3479 if (OrigPHIsToFix.size()) { 3480 assert(EnableVPlanNativePath && 3481 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 3482 fixNonInductionPHIs(); 3483 } 3484 3485 // At this point every instruction in the original loop is widened to a 3486 // vector form. Now we need to fix the recurrences in the loop. These PHI 3487 // nodes are currently empty because we did not want to introduce cycles. 3488 // This is the second stage of vectorizing recurrences. 3489 fixCrossIterationPHIs(); 3490 3491 // Forget the original basic block. 3492 PSE.getSE()->forgetLoop(OrigLoop); 3493 3494 // Fix-up external users of the induction variables. 3495 for (auto &Entry : Legal->getInductionVars()) 3496 fixupIVUsers(Entry.first, Entry.second, 3497 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 3498 IVEndValues[Entry.first], LoopMiddleBlock); 3499 3500 fixLCSSAPHIs(); 3501 for (Instruction *PI : PredicatedInstructions) 3502 sinkScalarOperands(&*PI); 3503 3504 // Remove redundant induction instructions. 3505 cse(LoopVectorBody); 3506 3507 // Set/update profile weights for the vector and remainder loops as original 3508 // loop iterations are now distributed among them. Note that original loop 3509 // represented by LoopScalarBody becomes remainder loop after vectorization. 3510 // 3511 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3512 // end up getting slightly roughened result but that should be OK since 3513 // profile is not inherently precise anyway. Note also possible bypass of 3514 // vector code caused by legality checks is ignored, assigning all the weight 3515 // to the vector loop, optimistically. 3516 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), 3517 LI->getLoopFor(LoopVectorBody), 3518 LI->getLoopFor(LoopScalarBody), VF * UF); 3519 } 3520 3521 void InnerLoopVectorizer::fixCrossIterationPHIs() { 3522 // In order to support recurrences we need to be able to vectorize Phi nodes. 3523 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3524 // stage #2: We now need to fix the recurrences by adding incoming edges to 3525 // the currently empty PHI nodes. At this point every instruction in the 3526 // original loop is widened to a vector form so we can use them to construct 3527 // the incoming edges. 3528 for (PHINode &Phi : OrigLoop->getHeader()->phis()) { 3529 // Handle first-order recurrences and reductions that need to be fixed. 3530 if (Legal->isFirstOrderRecurrence(&Phi)) 3531 fixFirstOrderRecurrence(&Phi); 3532 else if (Legal->isReductionVariable(&Phi)) 3533 fixReduction(&Phi); 3534 } 3535 } 3536 3537 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3538 // This is the second phase of vectorizing first-order recurrences. An 3539 // overview of the transformation is described below. Suppose we have the 3540 // following loop. 3541 // 3542 // for (int i = 0; i < n; ++i) 3543 // b[i] = a[i] - a[i - 1]; 3544 // 3545 // There is a first-order recurrence on "a". For this loop, the shorthand 3546 // scalar IR looks like: 3547 // 3548 // scalar.ph: 3549 // s_init = a[-1] 3550 // br scalar.body 3551 // 3552 // scalar.body: 3553 // i = phi [0, scalar.ph], [i+1, scalar.body] 3554 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3555 // s2 = a[i] 3556 // b[i] = s2 - s1 3557 // br cond, scalar.body, ... 3558 // 3559 // In this example, s1 is a recurrence because it's value depends on the 3560 // previous iteration. In the first phase of vectorization, we created a 3561 // temporary value for s1. We now complete the vectorization and produce the 3562 // shorthand vector IR shown below (for VF = 4, UF = 1). 3563 // 3564 // vector.ph: 3565 // v_init = vector(..., ..., ..., a[-1]) 3566 // br vector.body 3567 // 3568 // vector.body 3569 // i = phi [0, vector.ph], [i+4, vector.body] 3570 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3571 // v2 = a[i, i+1, i+2, i+3]; 3572 // v3 = vector(v1(3), v2(0, 1, 2)) 3573 // b[i, i+1, i+2, i+3] = v2 - v3 3574 // br cond, vector.body, middle.block 3575 // 3576 // middle.block: 3577 // x = v2(3) 3578 // br scalar.ph 3579 // 3580 // scalar.ph: 3581 // s_init = phi [x, middle.block], [a[-1], otherwise] 3582 // br scalar.body 3583 // 3584 // After execution completes the vector loop, we extract the next value of 3585 // the recurrence (x) to use as the initial value in the scalar loop. 3586 3587 // Get the original loop preheader and single loop latch. 3588 auto *Preheader = OrigLoop->getLoopPreheader(); 3589 auto *Latch = OrigLoop->getLoopLatch(); 3590 3591 // Get the initial and previous values of the scalar recurrence. 3592 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3593 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3594 3595 // Create a vector from the initial value. 3596 auto *VectorInit = ScalarInit; 3597 if (VF > 1) { 3598 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3599 VectorInit = Builder.CreateInsertElement( 3600 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 3601 Builder.getInt32(VF - 1), "vector.recur.init"); 3602 } 3603 3604 // We constructed a temporary phi node in the first phase of vectorization. 3605 // This phi node will eventually be deleted. 3606 Builder.SetInsertPoint( 3607 cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0))); 3608 3609 // Create a phi node for the new recurrence. The current value will either be 3610 // the initial value inserted into a vector or loop-varying vector value. 3611 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 3612 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 3613 3614 // Get the vectorized previous value of the last part UF - 1. It appears last 3615 // among all unrolled iterations, due to the order of their construction. 3616 Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1); 3617 3618 // Find and set the insertion point after the previous value if it is an 3619 // instruction. 3620 BasicBlock::iterator InsertPt; 3621 // Note that the previous value may have been constant-folded so it is not 3622 // guaranteed to be an instruction in the vector loop. 3623 // FIXME: Loop invariant values do not form recurrences. We should deal with 3624 // them earlier. 3625 if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart)) 3626 InsertPt = LoopVectorBody->getFirstInsertionPt(); 3627 else { 3628 Instruction *PreviousInst = cast<Instruction>(PreviousLastPart); 3629 if (isa<PHINode>(PreviousLastPart)) 3630 // If the previous value is a phi node, we should insert after all the phi 3631 // nodes in the block containing the PHI to avoid breaking basic block 3632 // verification. Note that the basic block may be different to 3633 // LoopVectorBody, in case we predicate the loop. 3634 InsertPt = PreviousInst->getParent()->getFirstInsertionPt(); 3635 else 3636 InsertPt = ++PreviousInst->getIterator(); 3637 } 3638 Builder.SetInsertPoint(&*InsertPt); 3639 3640 // We will construct a vector for the recurrence by combining the values for 3641 // the current and previous iterations. This is the required shuffle mask. 3642 SmallVector<int, 8> ShuffleMask(VF); 3643 ShuffleMask[0] = VF - 1; 3644 for (unsigned I = 1; I < VF; ++I) 3645 ShuffleMask[I] = I + VF - 1; 3646 3647 // The vector from which to take the initial value for the current iteration 3648 // (actual or unrolled). Initially, this is the vector phi node. 3649 Value *Incoming = VecPhi; 3650 3651 // Shuffle the current and previous vector and update the vector parts. 3652 for (unsigned Part = 0; Part < UF; ++Part) { 3653 Value *PreviousPart = getOrCreateVectorValue(Previous, Part); 3654 Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part); 3655 auto *Shuffle = VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart, 3656 ShuffleMask) 3657 : Incoming; 3658 PhiPart->replaceAllUsesWith(Shuffle); 3659 cast<Instruction>(PhiPart)->eraseFromParent(); 3660 VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle); 3661 Incoming = PreviousPart; 3662 } 3663 3664 // Fix the latch value of the new recurrence in the vector loop. 3665 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3666 3667 // Extract the last vector element in the middle block. This will be the 3668 // initial value for the recurrence when jumping to the scalar loop. 3669 auto *ExtractForScalar = Incoming; 3670 if (VF > 1) { 3671 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3672 ExtractForScalar = Builder.CreateExtractElement( 3673 ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract"); 3674 } 3675 // Extract the second last element in the middle block if the 3676 // Phi is used outside the loop. We need to extract the phi itself 3677 // and not the last element (the phi update in the current iteration). This 3678 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3679 // when the scalar loop is not run at all. 3680 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3681 if (VF > 1) 3682 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3683 Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi"); 3684 // When loop is unrolled without vectorizing, initialize 3685 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of 3686 // `Incoming`. This is analogous to the vectorized case above: extracting the 3687 // second last element when VF > 1. 3688 else if (UF > 1) 3689 ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2); 3690 3691 // Fix the initial value of the original recurrence in the scalar loop. 3692 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3693 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3694 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3695 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3696 Start->addIncoming(Incoming, BB); 3697 } 3698 3699 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3700 Phi->setName("scalar.recur"); 3701 3702 // Finally, fix users of the recurrence outside the loop. The users will need 3703 // either the last value of the scalar recurrence or the last value of the 3704 // vector recurrence we extracted in the middle block. Since the loop is in 3705 // LCSSA form, we just need to find all the phi nodes for the original scalar 3706 // recurrence in the exit block, and then add an edge for the middle block. 3707 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3708 if (LCSSAPhi.getIncomingValue(0) == Phi) { 3709 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3710 } 3711 } 3712 } 3713 3714 void InnerLoopVectorizer::fixReduction(PHINode *Phi) { 3715 Constant *Zero = Builder.getInt32(0); 3716 3717 // Get it's reduction variable descriptor. 3718 assert(Legal->isReductionVariable(Phi) && 3719 "Unable to find the reduction variable"); 3720 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[Phi]; 3721 3722 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3723 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3724 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3725 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3726 RdxDesc.getMinMaxRecurrenceKind(); 3727 setDebugLocFromInst(Builder, ReductionStartValue); 3728 3729 // We need to generate a reduction vector from the incoming scalar. 3730 // To do so, we need to generate the 'identity' vector and override 3731 // one of the elements with the incoming scalar reduction. We need 3732 // to do it in the vector-loop preheader. 3733 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 3734 3735 // This is the vector-clone of the value that leaves the loop. 3736 Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType(); 3737 3738 // Find the reduction identity variable. Zero for addition, or, xor, 3739 // one for multiplication, -1 for And. 3740 Value *Identity; 3741 Value *VectorStart; 3742 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3743 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3744 // MinMax reduction have the start value as their identify. 3745 if (VF == 1) { 3746 VectorStart = Identity = ReductionStartValue; 3747 } else { 3748 VectorStart = Identity = 3749 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3750 } 3751 } else { 3752 // Handle other reduction kinds: 3753 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3754 RK, VecTy->getScalarType()); 3755 if (VF == 1) { 3756 Identity = Iden; 3757 // This vector is the Identity vector where the first element is the 3758 // incoming scalar reduction. 3759 VectorStart = ReductionStartValue; 3760 } else { 3761 Identity = ConstantVector::getSplat({VF, false}, Iden); 3762 3763 // This vector is the Identity vector where the first element is the 3764 // incoming scalar reduction. 3765 VectorStart = 3766 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3767 } 3768 } 3769 3770 // Wrap flags are in general invalid after vectorization, clear them. 3771 clearReductionWrapFlags(RdxDesc); 3772 3773 // Fix the vector-loop phi. 3774 3775 // Reductions do not have to start at zero. They can start with 3776 // any loop invariant values. 3777 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3778 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3779 3780 for (unsigned Part = 0; Part < UF; ++Part) { 3781 Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part); 3782 Value *Val = getOrCreateVectorValue(LoopVal, Part); 3783 // Make sure to add the reduction start value only to the 3784 // first unroll part. 3785 Value *StartVal = (Part == 0) ? VectorStart : Identity; 3786 cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader); 3787 cast<PHINode>(VecRdxPhi) 3788 ->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 3789 } 3790 3791 // Before each round, move the insertion point right between 3792 // the PHIs and the values we are going to write. 3793 // This allows us to write both PHINodes and the extractelement 3794 // instructions. 3795 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3796 3797 setDebugLocFromInst(Builder, LoopExitInst); 3798 3799 // If tail is folded by masking, the vector value to leave the loop should be 3800 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3801 // instead of the former. 3802 if (Cost->foldTailByMasking()) { 3803 for (unsigned Part = 0; Part < UF; ++Part) { 3804 Value *VecLoopExitInst = 3805 VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3806 Value *Sel = nullptr; 3807 for (User *U : VecLoopExitInst->users()) { 3808 if (isa<SelectInst>(U)) { 3809 assert(!Sel && "Reduction exit feeding two selects"); 3810 Sel = U; 3811 } else 3812 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3813 } 3814 assert(Sel && "Reduction exit feeds no select"); 3815 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, Sel); 3816 } 3817 } 3818 3819 // If the vector reduction can be performed in a smaller type, we truncate 3820 // then extend the loop exit value to enable InstCombine to evaluate the 3821 // entire expression in the smaller type. 3822 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3823 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3824 Builder.SetInsertPoint( 3825 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 3826 VectorParts RdxParts(UF); 3827 for (unsigned Part = 0; Part < UF; ++Part) { 3828 RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3829 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3830 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3831 : Builder.CreateZExt(Trunc, VecTy); 3832 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 3833 UI != RdxParts[Part]->user_end();) 3834 if (*UI != Trunc) { 3835 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 3836 RdxParts[Part] = Extnd; 3837 } else { 3838 ++UI; 3839 } 3840 } 3841 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3842 for (unsigned Part = 0; Part < UF; ++Part) { 3843 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3844 VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]); 3845 } 3846 } 3847 3848 // Reduce all of the unrolled parts into a single vector. 3849 Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0); 3850 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3851 3852 // The middle block terminator has already been assigned a DebugLoc here (the 3853 // OrigLoop's single latch terminator). We want the whole middle block to 3854 // appear to execute on this line because: (a) it is all compiler generated, 3855 // (b) these instructions are always executed after evaluating the latch 3856 // conditional branch, and (c) other passes may add new predecessors which 3857 // terminate on this line. This is the easiest way to ensure we don't 3858 // accidentally cause an extra step back into the loop while debugging. 3859 setDebugLocFromInst(Builder, LoopMiddleBlock->getTerminator()); 3860 for (unsigned Part = 1; Part < UF; ++Part) { 3861 Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part); 3862 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3863 // Floating point operations had to be 'fast' to enable the reduction. 3864 ReducedPartRdx = addFastMathFlag( 3865 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart, 3866 ReducedPartRdx, "bin.rdx"), 3867 RdxDesc.getFastMathFlags()); 3868 else 3869 ReducedPartRdx = createMinMaxOp(Builder, MinMaxKind, ReducedPartRdx, 3870 RdxPart); 3871 } 3872 3873 if (VF > 1) { 3874 bool NoNaN = Legal->hasFunNoNaNAttr(); 3875 ReducedPartRdx = 3876 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN); 3877 // If the reduction can be performed in a smaller type, we need to extend 3878 // the reduction to the wider type before we branch to the original loop. 3879 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3880 ReducedPartRdx = 3881 RdxDesc.isSigned() 3882 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3883 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3884 } 3885 3886 // Create a phi node that merges control-flow from the backedge-taken check 3887 // block and the middle block. 3888 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3889 LoopScalarPreHeader->getTerminator()); 3890 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3891 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3892 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3893 3894 // Now, we need to fix the users of the reduction variable 3895 // inside and outside of the scalar remainder loop. 3896 // We know that the loop is in LCSSA form. We need to update the 3897 // PHI nodes in the exit blocks. 3898 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3899 // All PHINodes need to have a single entry edge, or two if 3900 // we already fixed them. 3901 assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3902 3903 // We found a reduction value exit-PHI. Update it with the 3904 // incoming bypass edge. 3905 if (LCSSAPhi.getIncomingValue(0) == LoopExitInst) 3906 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 3907 } // end of the LCSSA phi scan. 3908 3909 // Fix the scalar loop reduction variable with the incoming reduction sum 3910 // from the vector body and from the backedge value. 3911 int IncomingEdgeBlockIdx = 3912 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3913 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3914 // Pick the other block. 3915 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3916 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3917 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3918 } 3919 3920 void InnerLoopVectorizer::clearReductionWrapFlags( 3921 RecurrenceDescriptor &RdxDesc) { 3922 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3923 if (RK != RecurrenceDescriptor::RK_IntegerAdd && 3924 RK != RecurrenceDescriptor::RK_IntegerMult) 3925 return; 3926 3927 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 3928 assert(LoopExitInstr && "null loop exit instruction"); 3929 SmallVector<Instruction *, 8> Worklist; 3930 SmallPtrSet<Instruction *, 8> Visited; 3931 Worklist.push_back(LoopExitInstr); 3932 Visited.insert(LoopExitInstr); 3933 3934 while (!Worklist.empty()) { 3935 Instruction *Cur = Worklist.pop_back_val(); 3936 if (isa<OverflowingBinaryOperator>(Cur)) 3937 for (unsigned Part = 0; Part < UF; ++Part) { 3938 Value *V = getOrCreateVectorValue(Cur, Part); 3939 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 3940 } 3941 3942 for (User *U : Cur->users()) { 3943 Instruction *UI = cast<Instruction>(U); 3944 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 3945 Visited.insert(UI).second) 3946 Worklist.push_back(UI); 3947 } 3948 } 3949 } 3950 3951 void InnerLoopVectorizer::fixLCSSAPHIs() { 3952 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 3953 if (LCSSAPhi.getNumIncomingValues() == 1) { 3954 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 3955 // Non-instruction incoming values will have only one value. 3956 unsigned LastLane = 0; 3957 if (isa<Instruction>(IncomingValue)) 3958 LastLane = Cost->isUniformAfterVectorization( 3959 cast<Instruction>(IncomingValue), VF) 3960 ? 0 3961 : VF - 1; 3962 // Can be a loop invariant incoming value or the last scalar value to be 3963 // extracted from the vectorized loop. 3964 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3965 Value *lastIncomingValue = 3966 getOrCreateScalarValue(IncomingValue, { UF - 1, LastLane }); 3967 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 3968 } 3969 } 3970 } 3971 3972 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 3973 // The basic block and loop containing the predicated instruction. 3974 auto *PredBB = PredInst->getParent(); 3975 auto *VectorLoop = LI->getLoopFor(PredBB); 3976 3977 // Initialize a worklist with the operands of the predicated instruction. 3978 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 3979 3980 // Holds instructions that we need to analyze again. An instruction may be 3981 // reanalyzed if we don't yet know if we can sink it or not. 3982 SmallVector<Instruction *, 8> InstsToReanalyze; 3983 3984 // Returns true if a given use occurs in the predicated block. Phi nodes use 3985 // their operands in their corresponding predecessor blocks. 3986 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 3987 auto *I = cast<Instruction>(U.getUser()); 3988 BasicBlock *BB = I->getParent(); 3989 if (auto *Phi = dyn_cast<PHINode>(I)) 3990 BB = Phi->getIncomingBlock( 3991 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 3992 return BB == PredBB; 3993 }; 3994 3995 // Iteratively sink the scalarized operands of the predicated instruction 3996 // into the block we created for it. When an instruction is sunk, it's 3997 // operands are then added to the worklist. The algorithm ends after one pass 3998 // through the worklist doesn't sink a single instruction. 3999 bool Changed; 4000 do { 4001 // Add the instructions that need to be reanalyzed to the worklist, and 4002 // reset the changed indicator. 4003 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4004 InstsToReanalyze.clear(); 4005 Changed = false; 4006 4007 while (!Worklist.empty()) { 4008 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4009 4010 // We can't sink an instruction if it is a phi node, is already in the 4011 // predicated block, is not in the loop, or may have side effects. 4012 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4013 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4014 continue; 4015 4016 // It's legal to sink the instruction if all its uses occur in the 4017 // predicated block. Otherwise, there's nothing to do yet, and we may 4018 // need to reanalyze the instruction. 4019 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4020 InstsToReanalyze.push_back(I); 4021 continue; 4022 } 4023 4024 // Move the instruction to the beginning of the predicated block, and add 4025 // it's operands to the worklist. 4026 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4027 Worklist.insert(I->op_begin(), I->op_end()); 4028 4029 // The sinking may have enabled other instructions to be sunk, so we will 4030 // need to iterate. 4031 Changed = true; 4032 } 4033 } while (Changed); 4034 } 4035 4036 void InnerLoopVectorizer::fixNonInductionPHIs() { 4037 for (PHINode *OrigPhi : OrigPHIsToFix) { 4038 PHINode *NewPhi = 4039 cast<PHINode>(VectorLoopValueMap.getVectorValue(OrigPhi, 0)); 4040 unsigned NumIncomingValues = OrigPhi->getNumIncomingValues(); 4041 4042 SmallVector<BasicBlock *, 2> ScalarBBPredecessors( 4043 predecessors(OrigPhi->getParent())); 4044 SmallVector<BasicBlock *, 2> VectorBBPredecessors( 4045 predecessors(NewPhi->getParent())); 4046 assert(ScalarBBPredecessors.size() == VectorBBPredecessors.size() && 4047 "Scalar and Vector BB should have the same number of predecessors"); 4048 4049 // The insertion point in Builder may be invalidated by the time we get 4050 // here. Force the Builder insertion point to something valid so that we do 4051 // not run into issues during insertion point restore in 4052 // getOrCreateVectorValue calls below. 4053 Builder.SetInsertPoint(NewPhi); 4054 4055 // The predecessor order is preserved and we can rely on mapping between 4056 // scalar and vector block predecessors. 4057 for (unsigned i = 0; i < NumIncomingValues; ++i) { 4058 BasicBlock *NewPredBB = VectorBBPredecessors[i]; 4059 4060 // When looking up the new scalar/vector values to fix up, use incoming 4061 // values from original phi. 4062 Value *ScIncV = 4063 OrigPhi->getIncomingValueForBlock(ScalarBBPredecessors[i]); 4064 4065 // Scalar incoming value may need a broadcast 4066 Value *NewIncV = getOrCreateVectorValue(ScIncV, 0); 4067 NewPhi->addIncoming(NewIncV, NewPredBB); 4068 } 4069 } 4070 } 4071 4072 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, unsigned UF, 4073 unsigned VF, bool IsPtrLoopInvariant, 4074 SmallBitVector &IsIndexLoopInvariant) { 4075 // Construct a vector GEP by widening the operands of the scalar GEP as 4076 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4077 // results in a vector of pointers when at least one operand of the GEP 4078 // is vector-typed. Thus, to keep the representation compact, we only use 4079 // vector-typed operands for loop-varying values. 4080 4081 if (VF > 1 && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4082 // If we are vectorizing, but the GEP has only loop-invariant operands, 4083 // the GEP we build (by only using vector-typed operands for 4084 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4085 // produce a vector of pointers, we need to either arbitrarily pick an 4086 // operand to broadcast, or broadcast a clone of the original GEP. 4087 // Here, we broadcast a clone of the original. 4088 // 4089 // TODO: If at some point we decide to scalarize instructions having 4090 // loop-invariant operands, this special case will no longer be 4091 // required. We would add the scalarization decision to 4092 // collectLoopScalars() and teach getVectorValue() to broadcast 4093 // the lane-zero scalar value. 4094 auto *Clone = Builder.Insert(GEP->clone()); 4095 for (unsigned Part = 0; Part < UF; ++Part) { 4096 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4097 VectorLoopValueMap.setVectorValue(GEP, Part, EntryPart); 4098 addMetadata(EntryPart, GEP); 4099 } 4100 } else { 4101 // If the GEP has at least one loop-varying operand, we are sure to 4102 // produce a vector of pointers. But if we are only unrolling, we want 4103 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4104 // produce with the code below will be scalar (if VF == 1) or vector 4105 // (otherwise). Note that for the unroll-only case, we still maintain 4106 // values in the vector mapping with initVector, as we do for other 4107 // instructions. 4108 for (unsigned Part = 0; Part < UF; ++Part) { 4109 // The pointer operand of the new GEP. If it's loop-invariant, we 4110 // won't broadcast it. 4111 auto *Ptr = IsPtrLoopInvariant 4112 ? GEP->getPointerOperand() 4113 : getOrCreateVectorValue(GEP->getPointerOperand(), Part); 4114 4115 // Collect all the indices for the new GEP. If any index is 4116 // loop-invariant, we won't broadcast it. 4117 SmallVector<Value *, 4> Indices; 4118 for (auto Index : enumerate(GEP->indices())) { 4119 Value *User = Index.value().get(); 4120 if (IsIndexLoopInvariant[Index.index()]) 4121 Indices.push_back(User); 4122 else 4123 Indices.push_back(getOrCreateVectorValue(User, Part)); 4124 } 4125 4126 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4127 // but it should be a vector, otherwise. 4128 auto *NewGEP = 4129 GEP->isInBounds() 4130 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4131 Indices) 4132 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4133 assert((VF == 1 || NewGEP->getType()->isVectorTy()) && 4134 "NewGEP is not a pointer vector"); 4135 VectorLoopValueMap.setVectorValue(GEP, Part, NewGEP); 4136 addMetadata(NewGEP, GEP); 4137 } 4138 } 4139 } 4140 4141 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4142 unsigned VF) { 4143 PHINode *P = cast<PHINode>(PN); 4144 if (EnableVPlanNativePath) { 4145 // Currently we enter here in the VPlan-native path for non-induction 4146 // PHIs where all control flow is uniform. We simply widen these PHIs. 4147 // Create a vector phi with no operands - the vector phi operands will be 4148 // set at the end of vector code generation. 4149 Type *VecTy = 4150 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4151 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4152 VectorLoopValueMap.setVectorValue(P, 0, VecPhi); 4153 OrigPHIsToFix.push_back(P); 4154 4155 return; 4156 } 4157 4158 assert(PN->getParent() == OrigLoop->getHeader() && 4159 "Non-header phis should have been handled elsewhere"); 4160 4161 // In order to support recurrences we need to be able to vectorize Phi nodes. 4162 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4163 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4164 // this value when we vectorize all of the instructions that use the PHI. 4165 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4166 for (unsigned Part = 0; Part < UF; ++Part) { 4167 // This is phase one of vectorizing PHIs. 4168 Type *VecTy = 4169 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4170 Value *EntryPart = PHINode::Create( 4171 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4172 VectorLoopValueMap.setVectorValue(P, Part, EntryPart); 4173 } 4174 return; 4175 } 4176 4177 setDebugLocFromInst(Builder, P); 4178 4179 // This PHINode must be an induction variable. 4180 // Make sure that we know about it. 4181 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4182 4183 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4184 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4185 4186 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4187 // which can be found from the original scalar operations. 4188 switch (II.getKind()) { 4189 case InductionDescriptor::IK_NoInduction: 4190 llvm_unreachable("Unknown induction"); 4191 case InductionDescriptor::IK_IntInduction: 4192 case InductionDescriptor::IK_FpInduction: 4193 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4194 case InductionDescriptor::IK_PtrInduction: { 4195 // Handle the pointer induction variable case. 4196 assert(P->getType()->isPointerTy() && "Unexpected type."); 4197 // This is the normalized GEP that starts counting at zero. 4198 Value *PtrInd = Induction; 4199 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4200 // Determine the number of scalars we need to generate for each unroll 4201 // iteration. If the instruction is uniform, we only need to generate the 4202 // first lane. Otherwise, we generate all VF values. 4203 unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF; 4204 // These are the scalar results. Notice that we don't generate vector GEPs 4205 // because scalar GEPs result in better code. 4206 for (unsigned Part = 0; Part < UF; ++Part) { 4207 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4208 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4209 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4210 Value *SclrGep = 4211 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4212 SclrGep->setName("next.gep"); 4213 VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep); 4214 } 4215 } 4216 return; 4217 } 4218 } 4219 } 4220 4221 /// A helper function for checking whether an integer division-related 4222 /// instruction may divide by zero (in which case it must be predicated if 4223 /// executed conditionally in the scalar code). 4224 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4225 /// Non-zero divisors that are non compile-time constants will not be 4226 /// converted into multiplication, so we will still end up scalarizing 4227 /// the division, but can do so w/o predication. 4228 static bool mayDivideByZero(Instruction &I) { 4229 assert((I.getOpcode() == Instruction::UDiv || 4230 I.getOpcode() == Instruction::SDiv || 4231 I.getOpcode() == Instruction::URem || 4232 I.getOpcode() == Instruction::SRem) && 4233 "Unexpected instruction"); 4234 Value *Divisor = I.getOperand(1); 4235 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4236 return !CInt || CInt->isZero(); 4237 } 4238 4239 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPUser &User, 4240 VPTransformState &State) { 4241 switch (I.getOpcode()) { 4242 case Instruction::Call: 4243 case Instruction::Br: 4244 case Instruction::PHI: 4245 case Instruction::GetElementPtr: 4246 case Instruction::Select: 4247 llvm_unreachable("This instruction is handled by a different recipe."); 4248 case Instruction::UDiv: 4249 case Instruction::SDiv: 4250 case Instruction::SRem: 4251 case Instruction::URem: 4252 case Instruction::Add: 4253 case Instruction::FAdd: 4254 case Instruction::Sub: 4255 case Instruction::FSub: 4256 case Instruction::FNeg: 4257 case Instruction::Mul: 4258 case Instruction::FMul: 4259 case Instruction::FDiv: 4260 case Instruction::FRem: 4261 case Instruction::Shl: 4262 case Instruction::LShr: 4263 case Instruction::AShr: 4264 case Instruction::And: 4265 case Instruction::Or: 4266 case Instruction::Xor: { 4267 // Just widen unops and binops. 4268 setDebugLocFromInst(Builder, &I); 4269 4270 for (unsigned Part = 0; Part < UF; ++Part) { 4271 SmallVector<Value *, 2> Ops; 4272 for (VPValue *VPOp : User.operands()) 4273 Ops.push_back(State.get(VPOp, Part)); 4274 4275 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4276 4277 if (auto *VecOp = dyn_cast<Instruction>(V)) 4278 VecOp->copyIRFlags(&I); 4279 4280 // Use this vector value for all users of the original instruction. 4281 VectorLoopValueMap.setVectorValue(&I, Part, V); 4282 addMetadata(V, &I); 4283 } 4284 4285 break; 4286 } 4287 case Instruction::ICmp: 4288 case Instruction::FCmp: { 4289 // Widen compares. Generate vector compares. 4290 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4291 auto *Cmp = cast<CmpInst>(&I); 4292 setDebugLocFromInst(Builder, Cmp); 4293 for (unsigned Part = 0; Part < UF; ++Part) { 4294 Value *A = State.get(User.getOperand(0), Part); 4295 Value *B = State.get(User.getOperand(1), Part); 4296 Value *C = nullptr; 4297 if (FCmp) { 4298 // Propagate fast math flags. 4299 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4300 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4301 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4302 } else { 4303 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4304 } 4305 VectorLoopValueMap.setVectorValue(&I, Part, C); 4306 addMetadata(C, &I); 4307 } 4308 4309 break; 4310 } 4311 4312 case Instruction::ZExt: 4313 case Instruction::SExt: 4314 case Instruction::FPToUI: 4315 case Instruction::FPToSI: 4316 case Instruction::FPExt: 4317 case Instruction::PtrToInt: 4318 case Instruction::IntToPtr: 4319 case Instruction::SIToFP: 4320 case Instruction::UIToFP: 4321 case Instruction::Trunc: 4322 case Instruction::FPTrunc: 4323 case Instruction::BitCast: { 4324 auto *CI = cast<CastInst>(&I); 4325 setDebugLocFromInst(Builder, CI); 4326 4327 /// Vectorize casts. 4328 Type *DestTy = 4329 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4330 4331 for (unsigned Part = 0; Part < UF; ++Part) { 4332 Value *A = State.get(User.getOperand(0), Part); 4333 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4334 VectorLoopValueMap.setVectorValue(&I, Part, Cast); 4335 addMetadata(Cast, &I); 4336 } 4337 break; 4338 } 4339 default: 4340 // This instruction is not vectorized by simple widening. 4341 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4342 llvm_unreachable("Unhandled instruction!"); 4343 } // end of switch. 4344 } 4345 4346 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPUser &ArgOperands, 4347 VPTransformState &State) { 4348 assert(!isa<DbgInfoIntrinsic>(I) && 4349 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4350 setDebugLocFromInst(Builder, &I); 4351 4352 Module *M = I.getParent()->getParent()->getParent(); 4353 auto *CI = cast<CallInst>(&I); 4354 4355 SmallVector<Type *, 4> Tys; 4356 for (Value *ArgOperand : CI->arg_operands()) 4357 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4358 4359 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4360 4361 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4362 // version of the instruction. 4363 // Is it beneficial to perform intrinsic call compared to lib call? 4364 bool NeedToScalarize = false; 4365 unsigned CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4366 bool UseVectorIntrinsic = 4367 ID && Cost->getVectorIntrinsicCost(CI, VF) <= CallCost; 4368 assert((UseVectorIntrinsic || !NeedToScalarize) && 4369 "Instruction should be scalarized elsewhere."); 4370 4371 for (unsigned Part = 0; Part < UF; ++Part) { 4372 SmallVector<Value *, 4> Args; 4373 for (auto &I : enumerate(ArgOperands.operands())) { 4374 // Some intrinsics have a scalar argument - don't replace it with a 4375 // vector. 4376 Value *Arg; 4377 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 4378 Arg = State.get(I.value(), Part); 4379 else 4380 Arg = State.get(I.value(), {0, 0}); 4381 Args.push_back(Arg); 4382 } 4383 4384 Function *VectorF; 4385 if (UseVectorIntrinsic) { 4386 // Use vector version of the intrinsic. 4387 Type *TysForDecl[] = {CI->getType()}; 4388 if (VF > 1) 4389 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4390 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4391 assert(VectorF && "Can't retrieve vector intrinsic."); 4392 } else { 4393 // Use vector version of the function call. 4394 const VFShape Shape = 4395 VFShape::get(*CI, {VF, false} /*EC*/, false /*HasGlobalPred*/); 4396 #ifndef NDEBUG 4397 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 4398 "Can't create vector function."); 4399 #endif 4400 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 4401 } 4402 SmallVector<OperandBundleDef, 1> OpBundles; 4403 CI->getOperandBundlesAsDefs(OpBundles); 4404 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4405 4406 if (isa<FPMathOperator>(V)) 4407 V->copyFastMathFlags(CI); 4408 4409 VectorLoopValueMap.setVectorValue(&I, Part, V); 4410 addMetadata(V, &I); 4411 } 4412 } 4413 4414 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, 4415 bool InvariantCond) { 4416 setDebugLocFromInst(Builder, &I); 4417 4418 // The condition can be loop invariant but still defined inside the 4419 // loop. This means that we can't just use the original 'cond' value. 4420 // We have to take the 'vectorized' value and pick the first lane. 4421 // Instcombine will make this a no-op. 4422 4423 auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0}); 4424 4425 for (unsigned Part = 0; Part < UF; ++Part) { 4426 Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part); 4427 Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part); 4428 Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part); 4429 Value *Sel = 4430 Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1); 4431 VectorLoopValueMap.setVectorValue(&I, Part, Sel); 4432 addMetadata(Sel, &I); 4433 } 4434 } 4435 4436 void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) { 4437 // We should not collect Scalars more than once per VF. Right now, this 4438 // function is called from collectUniformsAndScalars(), which already does 4439 // this check. Collecting Scalars for VF=1 does not make any sense. 4440 assert(VF >= 2 && Scalars.find(VF) == Scalars.end() && 4441 "This function should not be visited twice for the same VF"); 4442 4443 SmallSetVector<Instruction *, 8> Worklist; 4444 4445 // These sets are used to seed the analysis with pointers used by memory 4446 // accesses that will remain scalar. 4447 SmallSetVector<Instruction *, 8> ScalarPtrs; 4448 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4449 4450 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4451 // The pointer operands of loads and stores will be scalar as long as the 4452 // memory access is not a gather or scatter operation. The value operand of a 4453 // store will remain scalar if the store is scalarized. 4454 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4455 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4456 assert(WideningDecision != CM_Unknown && 4457 "Widening decision should be ready at this moment"); 4458 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4459 if (Ptr == Store->getValueOperand()) 4460 return WideningDecision == CM_Scalarize; 4461 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4462 "Ptr is neither a value or pointer operand"); 4463 return WideningDecision != CM_GatherScatter; 4464 }; 4465 4466 // A helper that returns true if the given value is a bitcast or 4467 // getelementptr instruction contained in the loop. 4468 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4469 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4470 isa<GetElementPtrInst>(V)) && 4471 !TheLoop->isLoopInvariant(V); 4472 }; 4473 4474 // A helper that evaluates a memory access's use of a pointer. If the use 4475 // will be a scalar use, and the pointer is only used by memory accesses, we 4476 // place the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4477 // PossibleNonScalarPtrs. 4478 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4479 // We only care about bitcast and getelementptr instructions contained in 4480 // the loop. 4481 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4482 return; 4483 4484 // If the pointer has already been identified as scalar (e.g., if it was 4485 // also identified as uniform), there's nothing to do. 4486 auto *I = cast<Instruction>(Ptr); 4487 if (Worklist.count(I)) 4488 return; 4489 4490 // If the use of the pointer will be a scalar use, and all users of the 4491 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4492 // place the pointer in PossibleNonScalarPtrs. 4493 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4494 return isa<LoadInst>(U) || isa<StoreInst>(U); 4495 })) 4496 ScalarPtrs.insert(I); 4497 else 4498 PossibleNonScalarPtrs.insert(I); 4499 }; 4500 4501 // We seed the scalars analysis with three classes of instructions: (1) 4502 // instructions marked uniform-after-vectorization, (2) bitcast and 4503 // getelementptr instructions used by memory accesses requiring a scalar use, 4504 // and (3) pointer induction variables and their update instructions (we 4505 // currently only scalarize these). 4506 // 4507 // (1) Add to the worklist all instructions that have been identified as 4508 // uniform-after-vectorization. 4509 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4510 4511 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4512 // memory accesses requiring a scalar use. The pointer operands of loads and 4513 // stores will be scalar as long as the memory accesses is not a gather or 4514 // scatter operation. The value operand of a store will remain scalar if the 4515 // store is scalarized. 4516 for (auto *BB : TheLoop->blocks()) 4517 for (auto &I : *BB) { 4518 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4519 evaluatePtrUse(Load, Load->getPointerOperand()); 4520 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4521 evaluatePtrUse(Store, Store->getPointerOperand()); 4522 evaluatePtrUse(Store, Store->getValueOperand()); 4523 } 4524 } 4525 for (auto *I : ScalarPtrs) 4526 if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) { 4527 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4528 Worklist.insert(I); 4529 } 4530 4531 // (3) Add to the worklist all pointer induction variables and their update 4532 // instructions. 4533 // 4534 // TODO: Once we are able to vectorize pointer induction variables we should 4535 // no longer insert them into the worklist here. 4536 auto *Latch = TheLoop->getLoopLatch(); 4537 for (auto &Induction : Legal->getInductionVars()) { 4538 auto *Ind = Induction.first; 4539 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4540 if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction) 4541 continue; 4542 Worklist.insert(Ind); 4543 Worklist.insert(IndUpdate); 4544 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4545 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4546 << "\n"); 4547 } 4548 4549 // Insert the forced scalars. 4550 // FIXME: Currently widenPHIInstruction() often creates a dead vector 4551 // induction variable when the PHI user is scalarized. 4552 auto ForcedScalar = ForcedScalars.find(VF); 4553 if (ForcedScalar != ForcedScalars.end()) 4554 for (auto *I : ForcedScalar->second) 4555 Worklist.insert(I); 4556 4557 // Expand the worklist by looking through any bitcasts and getelementptr 4558 // instructions we've already identified as scalar. This is similar to the 4559 // expansion step in collectLoopUniforms(); however, here we're only 4560 // expanding to include additional bitcasts and getelementptr instructions. 4561 unsigned Idx = 0; 4562 while (Idx != Worklist.size()) { 4563 Instruction *Dst = Worklist[Idx++]; 4564 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4565 continue; 4566 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4567 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4568 auto *J = cast<Instruction>(U); 4569 return !TheLoop->contains(J) || Worklist.count(J) || 4570 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4571 isScalarUse(J, Src)); 4572 })) { 4573 Worklist.insert(Src); 4574 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4575 } 4576 } 4577 4578 // An induction variable will remain scalar if all users of the induction 4579 // variable and induction variable update remain scalar. 4580 for (auto &Induction : Legal->getInductionVars()) { 4581 auto *Ind = Induction.first; 4582 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4583 4584 // We already considered pointer induction variables, so there's no reason 4585 // to look at their users again. 4586 // 4587 // TODO: Once we are able to vectorize pointer induction variables we 4588 // should no longer skip over them here. 4589 if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction) 4590 continue; 4591 4592 // Determine if all users of the induction variable are scalar after 4593 // vectorization. 4594 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4595 auto *I = cast<Instruction>(U); 4596 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 4597 }); 4598 if (!ScalarInd) 4599 continue; 4600 4601 // Determine if all users of the induction variable update instruction are 4602 // scalar after vectorization. 4603 auto ScalarIndUpdate = 4604 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4605 auto *I = cast<Instruction>(U); 4606 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 4607 }); 4608 if (!ScalarIndUpdate) 4609 continue; 4610 4611 // The induction variable and its update instruction will remain scalar. 4612 Worklist.insert(Ind); 4613 Worklist.insert(IndUpdate); 4614 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4615 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4616 << "\n"); 4617 } 4618 4619 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4620 } 4621 4622 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, unsigned VF) { 4623 if (!blockNeedsPredication(I->getParent())) 4624 return false; 4625 switch(I->getOpcode()) { 4626 default: 4627 break; 4628 case Instruction::Load: 4629 case Instruction::Store: { 4630 if (!Legal->isMaskRequired(I)) 4631 return false; 4632 auto *Ptr = getLoadStorePointerOperand(I); 4633 auto *Ty = getMemInstValueType(I); 4634 // We have already decided how to vectorize this instruction, get that 4635 // result. 4636 if (VF > 1) { 4637 InstWidening WideningDecision = getWideningDecision(I, VF); 4638 assert(WideningDecision != CM_Unknown && 4639 "Widening decision should be ready at this moment"); 4640 return WideningDecision == CM_Scalarize; 4641 } 4642 const MaybeAlign Alignment = getLoadStoreAlignment(I); 4643 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4644 isLegalMaskedGather(Ty, Alignment)) 4645 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4646 isLegalMaskedScatter(Ty, Alignment)); 4647 } 4648 case Instruction::UDiv: 4649 case Instruction::SDiv: 4650 case Instruction::SRem: 4651 case Instruction::URem: 4652 return mayDivideByZero(*I); 4653 } 4654 return false; 4655 } 4656 4657 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(Instruction *I, 4658 unsigned VF) { 4659 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4660 assert(getWideningDecision(I, VF) == CM_Unknown && 4661 "Decision should not be set yet."); 4662 auto *Group = getInterleavedAccessGroup(I); 4663 assert(Group && "Must have a group."); 4664 4665 // If the instruction's allocated size doesn't equal it's type size, it 4666 // requires padding and will be scalarized. 4667 auto &DL = I->getModule()->getDataLayout(); 4668 auto *ScalarTy = getMemInstValueType(I); 4669 if (hasIrregularType(ScalarTy, DL, VF)) 4670 return false; 4671 4672 // Check if masking is required. 4673 // A Group may need masking for one of two reasons: it resides in a block that 4674 // needs predication, or it was decided to use masking to deal with gaps. 4675 bool PredicatedAccessRequiresMasking = 4676 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 4677 bool AccessWithGapsRequiresMasking = 4678 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 4679 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 4680 return true; 4681 4682 // If masked interleaving is required, we expect that the user/target had 4683 // enabled it, because otherwise it either wouldn't have been created or 4684 // it should have been invalidated by the CostModel. 4685 assert(useMaskedInterleavedAccesses(TTI) && 4686 "Masked interleave-groups for predicated accesses are not enabled."); 4687 4688 auto *Ty = getMemInstValueType(I); 4689 const MaybeAlign Alignment = getLoadStoreAlignment(I); 4690 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4691 : TTI.isLegalMaskedStore(Ty, Alignment); 4692 } 4693 4694 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I, 4695 unsigned VF) { 4696 // Get and ensure we have a valid memory instruction. 4697 LoadInst *LI = dyn_cast<LoadInst>(I); 4698 StoreInst *SI = dyn_cast<StoreInst>(I); 4699 assert((LI || SI) && "Invalid memory instruction"); 4700 4701 auto *Ptr = getLoadStorePointerOperand(I); 4702 4703 // In order to be widened, the pointer should be consecutive, first of all. 4704 if (!Legal->isConsecutivePtr(Ptr)) 4705 return false; 4706 4707 // If the instruction is a store located in a predicated block, it will be 4708 // scalarized. 4709 if (isScalarWithPredication(I)) 4710 return false; 4711 4712 // If the instruction's allocated size doesn't equal it's type size, it 4713 // requires padding and will be scalarized. 4714 auto &DL = I->getModule()->getDataLayout(); 4715 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 4716 if (hasIrregularType(ScalarTy, DL, VF)) 4717 return false; 4718 4719 return true; 4720 } 4721 4722 void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) { 4723 // We should not collect Uniforms more than once per VF. Right now, 4724 // this function is called from collectUniformsAndScalars(), which 4725 // already does this check. Collecting Uniforms for VF=1 does not make any 4726 // sense. 4727 4728 assert(VF >= 2 && Uniforms.find(VF) == Uniforms.end() && 4729 "This function should not be visited twice for the same VF"); 4730 4731 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4732 // not analyze again. Uniforms.count(VF) will return 1. 4733 Uniforms[VF].clear(); 4734 4735 // We now know that the loop is vectorizable! 4736 // Collect instructions inside the loop that will remain uniform after 4737 // vectorization. 4738 4739 // Global values, params and instructions outside of current loop are out of 4740 // scope. 4741 auto isOutOfScope = [&](Value *V) -> bool { 4742 Instruction *I = dyn_cast<Instruction>(V); 4743 return (!I || !TheLoop->contains(I)); 4744 }; 4745 4746 SetVector<Instruction *> Worklist; 4747 BasicBlock *Latch = TheLoop->getLoopLatch(); 4748 4749 // Instructions that are scalar with predication must not be considered 4750 // uniform after vectorization, because that would create an erroneous 4751 // replicating region where only a single instance out of VF should be formed. 4752 // TODO: optimize such seldom cases if found important, see PR40816. 4753 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4754 if (isScalarWithPredication(I, VF)) { 4755 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4756 << *I << "\n"); 4757 return; 4758 } 4759 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4760 Worklist.insert(I); 4761 }; 4762 4763 // Start with the conditional branch. If the branch condition is an 4764 // instruction contained in the loop that is only used by the branch, it is 4765 // uniform. 4766 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4767 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4768 addToWorklistIfAllowed(Cmp); 4769 4770 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 4771 // are pointers that are treated like consecutive pointers during 4772 // vectorization. The pointer operands of interleaved accesses are an 4773 // example. 4774 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 4775 4776 // Holds pointer operands of instructions that are possibly non-uniform. 4777 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 4778 4779 auto isUniformDecision = [&](Instruction *I, unsigned VF) { 4780 InstWidening WideningDecision = getWideningDecision(I, VF); 4781 assert(WideningDecision != CM_Unknown && 4782 "Widening decision should be ready at this moment"); 4783 4784 return (WideningDecision == CM_Widen || 4785 WideningDecision == CM_Widen_Reverse || 4786 WideningDecision == CM_Interleave); 4787 }; 4788 // Iterate over the instructions in the loop, and collect all 4789 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 4790 // that a consecutive-like pointer operand will be scalarized, we collect it 4791 // in PossibleNonUniformPtrs instead. We use two sets here because a single 4792 // getelementptr instruction can be used by both vectorized and scalarized 4793 // memory instructions. For example, if a loop loads and stores from the same 4794 // location, but the store is conditional, the store will be scalarized, and 4795 // the getelementptr won't remain uniform. 4796 for (auto *BB : TheLoop->blocks()) 4797 for (auto &I : *BB) { 4798 // If there's no pointer operand, there's nothing to do. 4799 auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 4800 if (!Ptr) 4801 continue; 4802 4803 // True if all users of Ptr are memory accesses that have Ptr as their 4804 // pointer operand. 4805 auto UsersAreMemAccesses = 4806 llvm::all_of(Ptr->users(), [&](User *U) -> bool { 4807 return getLoadStorePointerOperand(U) == Ptr; 4808 }); 4809 4810 // Ensure the memory instruction will not be scalarized or used by 4811 // gather/scatter, making its pointer operand non-uniform. If the pointer 4812 // operand is used by any instruction other than a memory access, we 4813 // conservatively assume the pointer operand may be non-uniform. 4814 if (!UsersAreMemAccesses || !isUniformDecision(&I, VF)) 4815 PossibleNonUniformPtrs.insert(Ptr); 4816 4817 // If the memory instruction will be vectorized and its pointer operand 4818 // is consecutive-like, or interleaving - the pointer operand should 4819 // remain uniform. 4820 else 4821 ConsecutiveLikePtrs.insert(Ptr); 4822 } 4823 4824 // Add to the Worklist all consecutive and consecutive-like pointers that 4825 // aren't also identified as possibly non-uniform. 4826 for (auto *V : ConsecutiveLikePtrs) 4827 if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end()) 4828 addToWorklistIfAllowed(V); 4829 4830 // Expand Worklist in topological order: whenever a new instruction 4831 // is added , its users should be already inside Worklist. It ensures 4832 // a uniform instruction will only be used by uniform instructions. 4833 unsigned idx = 0; 4834 while (idx != Worklist.size()) { 4835 Instruction *I = Worklist[idx++]; 4836 4837 for (auto OV : I->operand_values()) { 4838 // isOutOfScope operands cannot be uniform instructions. 4839 if (isOutOfScope(OV)) 4840 continue; 4841 // First order recurrence Phi's should typically be considered 4842 // non-uniform. 4843 auto *OP = dyn_cast<PHINode>(OV); 4844 if (OP && Legal->isFirstOrderRecurrence(OP)) 4845 continue; 4846 // If all the users of the operand are uniform, then add the 4847 // operand into the uniform worklist. 4848 auto *OI = cast<Instruction>(OV); 4849 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4850 auto *J = cast<Instruction>(U); 4851 return Worklist.count(J) || 4852 (OI == getLoadStorePointerOperand(J) && 4853 isUniformDecision(J, VF)); 4854 })) 4855 addToWorklistIfAllowed(OI); 4856 } 4857 } 4858 4859 // Returns true if Ptr is the pointer operand of a memory access instruction 4860 // I, and I is known to not require scalarization. 4861 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4862 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4863 }; 4864 4865 // For an instruction to be added into Worklist above, all its users inside 4866 // the loop should also be in Worklist. However, this condition cannot be 4867 // true for phi nodes that form a cyclic dependence. We must process phi 4868 // nodes separately. An induction variable will remain uniform if all users 4869 // of the induction variable and induction variable update remain uniform. 4870 // The code below handles both pointer and non-pointer induction variables. 4871 for (auto &Induction : Legal->getInductionVars()) { 4872 auto *Ind = Induction.first; 4873 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4874 4875 // Determine if all users of the induction variable are uniform after 4876 // vectorization. 4877 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4878 auto *I = cast<Instruction>(U); 4879 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4880 isVectorizedMemAccessUse(I, Ind); 4881 }); 4882 if (!UniformInd) 4883 continue; 4884 4885 // Determine if all users of the induction variable update instruction are 4886 // uniform after vectorization. 4887 auto UniformIndUpdate = 4888 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4889 auto *I = cast<Instruction>(U); 4890 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4891 isVectorizedMemAccessUse(I, IndUpdate); 4892 }); 4893 if (!UniformIndUpdate) 4894 continue; 4895 4896 // The induction variable and its update instruction will remain uniform. 4897 addToWorklistIfAllowed(Ind); 4898 addToWorklistIfAllowed(IndUpdate); 4899 } 4900 4901 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4902 } 4903 4904 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4905 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4906 4907 if (Legal->getRuntimePointerChecking()->Need) { 4908 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4909 "runtime pointer checks needed. Enable vectorization of this " 4910 "loop with '#pragma clang loop vectorize(enable)' when " 4911 "compiling with -Os/-Oz", 4912 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4913 return true; 4914 } 4915 4916 if (!PSE.getUnionPredicate().getPredicates().empty()) { 4917 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4918 "runtime SCEV checks needed. Enable vectorization of this " 4919 "loop with '#pragma clang loop vectorize(enable)' when " 4920 "compiling with -Os/-Oz", 4921 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4922 return true; 4923 } 4924 4925 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4926 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4927 reportVectorizationFailure("Runtime stride check is required with -Os/-Oz", 4928 "runtime stride == 1 checks needed. Enable vectorization of " 4929 "this loop with '#pragma clang loop vectorize(enable)' when " 4930 "compiling with -Os/-Oz", 4931 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4932 return true; 4933 } 4934 4935 return false; 4936 } 4937 4938 Optional<unsigned> LoopVectorizationCostModel::computeMaxVF() { 4939 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4940 // TODO: It may by useful to do since it's still likely to be dynamically 4941 // uniform if the target can skip. 4942 reportVectorizationFailure( 4943 "Not inserting runtime ptr check for divergent target", 4944 "runtime pointer checks needed. Not enabled for divergent target", 4945 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 4946 return None; 4947 } 4948 4949 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4950 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4951 if (TC == 1) { 4952 reportVectorizationFailure("Single iteration (non) loop", 4953 "loop trip count is one, irrelevant for vectorization", 4954 "SingleIterationLoop", ORE, TheLoop); 4955 return None; 4956 } 4957 4958 switch (ScalarEpilogueStatus) { 4959 case CM_ScalarEpilogueAllowed: 4960 return computeFeasibleMaxVF(TC); 4961 case CM_ScalarEpilogueNotNeededUsePredicate: 4962 LLVM_DEBUG( 4963 dbgs() << "LV: vector predicate hint/switch found.\n" 4964 << "LV: Not allowing scalar epilogue, creating predicated " 4965 << "vector loop.\n"); 4966 break; 4967 case CM_ScalarEpilogueNotAllowedLowTripLoop: 4968 // fallthrough as a special case of OptForSize 4969 case CM_ScalarEpilogueNotAllowedOptSize: 4970 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 4971 LLVM_DEBUG( 4972 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 4973 else 4974 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 4975 << "count.\n"); 4976 4977 // Bail if runtime checks are required, which are not good when optimising 4978 // for size. 4979 if (runtimeChecksRequired()) 4980 return None; 4981 break; 4982 } 4983 4984 // Now try the tail folding 4985 4986 // Invalidate interleave groups that require an epilogue if we can't mask 4987 // the interleave-group. 4988 if (!useMaskedInterleavedAccesses(TTI)) { 4989 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 4990 "No decisions should have been taken at this point"); 4991 // Note: There is no need to invalidate any cost modeling decisions here, as 4992 // non where taken so far. 4993 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 4994 } 4995 4996 unsigned MaxVF = computeFeasibleMaxVF(TC); 4997 if (TC > 0 && TC % MaxVF == 0) { 4998 // Accept MaxVF if we do not have a tail. 4999 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5000 return MaxVF; 5001 } 5002 5003 // If we don't know the precise trip count, or if the trip count that we 5004 // found modulo the vectorization factor is not zero, try to fold the tail 5005 // by masking. 5006 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5007 if (Legal->prepareToFoldTailByMasking()) { 5008 FoldTailByMasking = true; 5009 return MaxVF; 5010 } 5011 5012 if (TC == 0) { 5013 reportVectorizationFailure( 5014 "Unable to calculate the loop count due to complex control flow", 5015 "unable to calculate the loop count due to complex control flow", 5016 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5017 return None; 5018 } 5019 5020 reportVectorizationFailure( 5021 "Cannot optimize for size and vectorize at the same time.", 5022 "cannot optimize for size and vectorize at the same time. " 5023 "Enable vectorization of this loop with '#pragma clang loop " 5024 "vectorize(enable)' when compiling with -Os/-Oz", 5025 "NoTailLoopWithOptForSize", ORE, TheLoop); 5026 return None; 5027 } 5028 5029 unsigned 5030 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) { 5031 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5032 unsigned SmallestType, WidestType; 5033 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5034 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5035 5036 // Get the maximum safe dependence distance in bits computed by LAA. 5037 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5038 // the memory accesses that is most restrictive (involved in the smallest 5039 // dependence distance). 5040 unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth(); 5041 5042 WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth); 5043 5044 unsigned MaxVectorSize = WidestRegister / WidestType; 5045 5046 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5047 << " / " << WidestType << " bits.\n"); 5048 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5049 << WidestRegister << " bits.\n"); 5050 5051 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" 5052 " into one vector!"); 5053 if (MaxVectorSize == 0) { 5054 LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5055 MaxVectorSize = 1; 5056 return MaxVectorSize; 5057 } else if (ConstTripCount && ConstTripCount < MaxVectorSize && 5058 isPowerOf2_32(ConstTripCount)) { 5059 // We need to clamp the VF to be the ConstTripCount. There is no point in 5060 // choosing a higher viable VF as done in the loop below. 5061 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5062 << ConstTripCount << "\n"); 5063 MaxVectorSize = ConstTripCount; 5064 return MaxVectorSize; 5065 } 5066 5067 unsigned MaxVF = MaxVectorSize; 5068 if (TTI.shouldMaximizeVectorBandwidth(!isScalarEpilogueAllowed()) || 5069 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5070 // Collect all viable vectorization factors larger than the default MaxVF 5071 // (i.e. MaxVectorSize). 5072 SmallVector<unsigned, 8> VFs; 5073 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5074 for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2) 5075 VFs.push_back(VS); 5076 5077 // For each VF calculate its register usage. 5078 auto RUs = calculateRegisterUsage(VFs); 5079 5080 // Select the largest VF which doesn't require more registers than existing 5081 // ones. 5082 for (int i = RUs.size() - 1; i >= 0; --i) { 5083 bool Selected = true; 5084 for (auto& pair : RUs[i].MaxLocalUsers) { 5085 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5086 if (pair.second > TargetNumRegisters) 5087 Selected = false; 5088 } 5089 if (Selected) { 5090 MaxVF = VFs[i]; 5091 break; 5092 } 5093 } 5094 if (unsigned MinVF = TTI.getMinimumVF(SmallestType)) { 5095 if (MaxVF < MinVF) { 5096 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5097 << ") with target's minimum: " << MinVF << '\n'); 5098 MaxVF = MinVF; 5099 } 5100 } 5101 } 5102 return MaxVF; 5103 } 5104 5105 VectorizationFactor 5106 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { 5107 float Cost = expectedCost(1).first; 5108 const float ScalarCost = Cost; 5109 unsigned Width = 1; 5110 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5111 5112 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5113 if (ForceVectorization && MaxVF > 1) { 5114 // Ignore scalar width, because the user explicitly wants vectorization. 5115 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5116 // evaluation. 5117 Cost = std::numeric_limits<float>::max(); 5118 } 5119 5120 for (unsigned i = 2; i <= MaxVF; i *= 2) { 5121 // Notice that the vector loop needs to be executed less times, so 5122 // we need to divide the cost of the vector loops by the width of 5123 // the vector elements. 5124 VectorizationCostTy C = expectedCost(i); 5125 float VectorCost = C.first / (float)i; 5126 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5127 << " costs: " << (int)VectorCost << ".\n"); 5128 if (!C.second && !ForceVectorization) { 5129 LLVM_DEBUG( 5130 dbgs() << "LV: Not considering vector loop of width " << i 5131 << " because it will not generate any vector instructions.\n"); 5132 continue; 5133 } 5134 if (VectorCost < Cost) { 5135 Cost = VectorCost; 5136 Width = i; 5137 } 5138 } 5139 5140 if (!EnableCondStoresVectorization && NumPredStores) { 5141 reportVectorizationFailure("There are conditional stores.", 5142 "store that is conditionally executed prevents vectorization", 5143 "ConditionalStore", ORE, TheLoop); 5144 Width = 1; 5145 Cost = ScalarCost; 5146 } 5147 5148 LLVM_DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5149 << "LV: Vectorization seems to be not beneficial, " 5150 << "but was forced by a user.\n"); 5151 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5152 VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)}; 5153 return Factor; 5154 } 5155 5156 std::pair<unsigned, unsigned> 5157 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5158 unsigned MinWidth = -1U; 5159 unsigned MaxWidth = 8; 5160 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5161 5162 // For each block. 5163 for (BasicBlock *BB : TheLoop->blocks()) { 5164 // For each instruction in the loop. 5165 for (Instruction &I : BB->instructionsWithoutDebug()) { 5166 Type *T = I.getType(); 5167 5168 // Skip ignored values. 5169 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end()) 5170 continue; 5171 5172 // Only examine Loads, Stores and PHINodes. 5173 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5174 continue; 5175 5176 // Examine PHI nodes that are reduction variables. Update the type to 5177 // account for the recurrence type. 5178 if (auto *PN = dyn_cast<PHINode>(&I)) { 5179 if (!Legal->isReductionVariable(PN)) 5180 continue; 5181 RecurrenceDescriptor RdxDesc = Legal->getReductionVars()[PN]; 5182 T = RdxDesc.getRecurrenceType(); 5183 } 5184 5185 // Examine the stored values. 5186 if (auto *ST = dyn_cast<StoreInst>(&I)) 5187 T = ST->getValueOperand()->getType(); 5188 5189 // Ignore loaded pointer types and stored pointer types that are not 5190 // vectorizable. 5191 // 5192 // FIXME: The check here attempts to predict whether a load or store will 5193 // be vectorized. We only know this for certain after a VF has 5194 // been selected. Here, we assume that if an access can be 5195 // vectorized, it will be. We should also look at extending this 5196 // optimization to non-pointer types. 5197 // 5198 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 5199 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 5200 continue; 5201 5202 MinWidth = std::min(MinWidth, 5203 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5204 MaxWidth = std::max(MaxWidth, 5205 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5206 } 5207 } 5208 5209 return {MinWidth, MaxWidth}; 5210 } 5211 5212 unsigned LoopVectorizationCostModel::selectInterleaveCount(unsigned VF, 5213 unsigned LoopCost) { 5214 // -- The interleave heuristics -- 5215 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5216 // There are many micro-architectural considerations that we can't predict 5217 // at this level. For example, frontend pressure (on decode or fetch) due to 5218 // code size, or the number and capabilities of the execution ports. 5219 // 5220 // We use the following heuristics to select the interleave count: 5221 // 1. If the code has reductions, then we interleave to break the cross 5222 // iteration dependency. 5223 // 2. If the loop is really small, then we interleave to reduce the loop 5224 // overhead. 5225 // 3. We don't interleave if we think that we will spill registers to memory 5226 // due to the increased register pressure. 5227 5228 if (!isScalarEpilogueAllowed()) 5229 return 1; 5230 5231 // We used the distance for the interleave count. 5232 if (Legal->getMaxSafeDepDistBytes() != -1U) 5233 return 1; 5234 5235 // Do not interleave loops with a relatively small known or estimated trip 5236 // count. 5237 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5238 if (BestKnownTC && *BestKnownTC < TinyTripCountInterleaveThreshold) 5239 return 1; 5240 5241 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5242 // We divide by these constants so assume that we have at least one 5243 // instruction that uses at least one register. 5244 for (auto& pair : R.MaxLocalUsers) { 5245 pair.second = std::max(pair.second, 1U); 5246 } 5247 5248 // We calculate the interleave count using the following formula. 5249 // Subtract the number of loop invariants from the number of available 5250 // registers. These registers are used by all of the interleaved instances. 5251 // Next, divide the remaining registers by the number of registers that is 5252 // required by the loop, in order to estimate how many parallel instances 5253 // fit without causing spills. All of this is rounded down if necessary to be 5254 // a power of two. We want power of two interleave count to simplify any 5255 // addressing operations or alignment considerations. 5256 // We also want power of two interleave counts to ensure that the induction 5257 // variable of the vector loop wraps to zero, when tail is folded by masking; 5258 // this currently happens when OptForSize, in which case IC is set to 1 above. 5259 unsigned IC = UINT_MAX; 5260 5261 for (auto& pair : R.MaxLocalUsers) { 5262 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5263 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5264 << " registers of " 5265 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5266 if (VF == 1) { 5267 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5268 TargetNumRegisters = ForceTargetNumScalarRegs; 5269 } else { 5270 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5271 TargetNumRegisters = ForceTargetNumVectorRegs; 5272 } 5273 unsigned MaxLocalUsers = pair.second; 5274 unsigned LoopInvariantRegs = 0; 5275 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5276 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5277 5278 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5279 // Don't count the induction variable as interleaved. 5280 if (EnableIndVarRegisterHeur) { 5281 TmpIC = 5282 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5283 std::max(1U, (MaxLocalUsers - 1))); 5284 } 5285 5286 IC = std::min(IC, TmpIC); 5287 } 5288 5289 // Clamp the interleave ranges to reasonable counts. 5290 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5291 5292 // Check if the user has overridden the max. 5293 if (VF == 1) { 5294 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5295 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5296 } else { 5297 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5298 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5299 } 5300 5301 // If trip count is known or estimated compile time constant, limit the 5302 // interleave count to be less than the trip count divided by VF. 5303 if (BestKnownTC) { 5304 MaxInterleaveCount = std::min(*BestKnownTC / VF, MaxInterleaveCount); 5305 } 5306 5307 // If we did not calculate the cost for VF (because the user selected the VF) 5308 // then we calculate the cost of VF here. 5309 if (LoopCost == 0) 5310 LoopCost = expectedCost(VF).first; 5311 5312 assert(LoopCost && "Non-zero loop cost expected"); 5313 5314 // Clamp the calculated IC to be between the 1 and the max interleave count 5315 // that the target and trip count allows. 5316 if (IC > MaxInterleaveCount) 5317 IC = MaxInterleaveCount; 5318 else if (IC < 1) 5319 IC = 1; 5320 5321 // Interleave if we vectorized this loop and there is a reduction that could 5322 // benefit from interleaving. 5323 if (VF > 1 && !Legal->getReductionVars().empty()) { 5324 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5325 return IC; 5326 } 5327 5328 // Note that if we've already vectorized the loop we will have done the 5329 // runtime check and so interleaving won't require further checks. 5330 bool InterleavingRequiresRuntimePointerCheck = 5331 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5332 5333 // We want to interleave small loops in order to reduce the loop overhead and 5334 // potentially expose ILP opportunities. 5335 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5336 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5337 // We assume that the cost overhead is 1 and we use the cost model 5338 // to estimate the cost of the loop and interleave until the cost of the 5339 // loop overhead is about 5% of the cost of the loop. 5340 unsigned SmallIC = 5341 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5342 5343 // Interleave until store/load ports (estimated by max interleave count) are 5344 // saturated. 5345 unsigned NumStores = Legal->getNumStores(); 5346 unsigned NumLoads = Legal->getNumLoads(); 5347 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5348 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5349 5350 // If we have a scalar reduction (vector reductions are already dealt with 5351 // by this point), we can increase the critical path length if the loop 5352 // we're interleaving is inside another loop. Limit, by default to 2, so the 5353 // critical path only gets increased by one reduction operation. 5354 if (!Legal->getReductionVars().empty() && TheLoop->getLoopDepth() > 1) { 5355 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5356 SmallIC = std::min(SmallIC, F); 5357 StoresIC = std::min(StoresIC, F); 5358 LoadsIC = std::min(LoadsIC, F); 5359 } 5360 5361 if (EnableLoadStoreRuntimeInterleave && 5362 std::max(StoresIC, LoadsIC) > SmallIC) { 5363 LLVM_DEBUG( 5364 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5365 return std::max(StoresIC, LoadsIC); 5366 } 5367 5368 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5369 return SmallIC; 5370 } 5371 5372 // Interleave if this is a large loop (small loops are already dealt with by 5373 // this point) that could benefit from interleaving. 5374 bool HasReductions = !Legal->getReductionVars().empty(); 5375 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5376 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5377 return IC; 5378 } 5379 5380 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5381 return 1; 5382 } 5383 5384 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5385 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5386 // This function calculates the register usage by measuring the highest number 5387 // of values that are alive at a single location. Obviously, this is a very 5388 // rough estimation. We scan the loop in a topological order in order and 5389 // assign a number to each instruction. We use RPO to ensure that defs are 5390 // met before their users. We assume that each instruction that has in-loop 5391 // users starts an interval. We record every time that an in-loop value is 5392 // used, so we have a list of the first and last occurrences of each 5393 // instruction. Next, we transpose this data structure into a multi map that 5394 // holds the list of intervals that *end* at a specific location. This multi 5395 // map allows us to perform a linear search. We scan the instructions linearly 5396 // and record each time that a new interval starts, by placing it in a set. 5397 // If we find this value in the multi-map then we remove it from the set. 5398 // The max register usage is the maximum size of the set. 5399 // We also search for instructions that are defined outside the loop, but are 5400 // used inside the loop. We need this number separately from the max-interval 5401 // usage number because when we unroll, loop-invariant values do not take 5402 // more register. 5403 LoopBlocksDFS DFS(TheLoop); 5404 DFS.perform(LI); 5405 5406 RegisterUsage RU; 5407 5408 // Each 'key' in the map opens a new interval. The values 5409 // of the map are the index of the 'last seen' usage of the 5410 // instruction that is the key. 5411 using IntervalMap = DenseMap<Instruction *, unsigned>; 5412 5413 // Maps instruction to its index. 5414 SmallVector<Instruction *, 64> IdxToInstr; 5415 // Marks the end of each interval. 5416 IntervalMap EndPoint; 5417 // Saves the list of instruction indices that are used in the loop. 5418 SmallPtrSet<Instruction *, 8> Ends; 5419 // Saves the list of values that are used in the loop but are 5420 // defined outside the loop, such as arguments and constants. 5421 SmallPtrSet<Value *, 8> LoopInvariants; 5422 5423 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5424 for (Instruction &I : BB->instructionsWithoutDebug()) { 5425 IdxToInstr.push_back(&I); 5426 5427 // Save the end location of each USE. 5428 for (Value *U : I.operands()) { 5429 auto *Instr = dyn_cast<Instruction>(U); 5430 5431 // Ignore non-instruction values such as arguments, constants, etc. 5432 if (!Instr) 5433 continue; 5434 5435 // If this instruction is outside the loop then record it and continue. 5436 if (!TheLoop->contains(Instr)) { 5437 LoopInvariants.insert(Instr); 5438 continue; 5439 } 5440 5441 // Overwrite previous end points. 5442 EndPoint[Instr] = IdxToInstr.size(); 5443 Ends.insert(Instr); 5444 } 5445 } 5446 } 5447 5448 // Saves the list of intervals that end with the index in 'key'. 5449 using InstrList = SmallVector<Instruction *, 2>; 5450 DenseMap<unsigned, InstrList> TransposeEnds; 5451 5452 // Transpose the EndPoints to a list of values that end at each index. 5453 for (auto &Interval : EndPoint) 5454 TransposeEnds[Interval.second].push_back(Interval.first); 5455 5456 SmallPtrSet<Instruction *, 8> OpenIntervals; 5457 5458 // Get the size of the widest register. 5459 unsigned MaxSafeDepDist = -1U; 5460 if (Legal->getMaxSafeDepDistBytes() != -1U) 5461 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5462 unsigned WidestRegister = 5463 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5464 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5465 5466 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5467 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5468 5469 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5470 5471 // A lambda that gets the register usage for the given type and VF. 5472 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5473 if (Ty->isTokenTy()) 5474 return 0U; 5475 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5476 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5477 }; 5478 5479 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5480 Instruction *I = IdxToInstr[i]; 5481 5482 // Remove all of the instructions that end at this location. 5483 InstrList &List = TransposeEnds[i]; 5484 for (Instruction *ToRemove : List) 5485 OpenIntervals.erase(ToRemove); 5486 5487 // Ignore instructions that are never used within the loop. 5488 if (Ends.find(I) == Ends.end()) 5489 continue; 5490 5491 // Skip ignored values. 5492 if (ValuesToIgnore.find(I) != ValuesToIgnore.end()) 5493 continue; 5494 5495 // For each VF find the maximum usage of registers. 5496 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5497 // Count the number of live intervals. 5498 SmallMapVector<unsigned, unsigned, 4> RegUsage; 5499 5500 if (VFs[j] == 1) { 5501 for (auto Inst : OpenIntervals) { 5502 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5503 if (RegUsage.find(ClassID) == RegUsage.end()) 5504 RegUsage[ClassID] = 1; 5505 else 5506 RegUsage[ClassID] += 1; 5507 } 5508 } else { 5509 collectUniformsAndScalars(VFs[j]); 5510 for (auto Inst : OpenIntervals) { 5511 // Skip ignored values for VF > 1. 5512 if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end()) 5513 continue; 5514 if (isScalarAfterVectorization(Inst, VFs[j])) { 5515 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5516 if (RegUsage.find(ClassID) == RegUsage.end()) 5517 RegUsage[ClassID] = 1; 5518 else 5519 RegUsage[ClassID] += 1; 5520 } else { 5521 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 5522 if (RegUsage.find(ClassID) == RegUsage.end()) 5523 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 5524 else 5525 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 5526 } 5527 } 5528 } 5529 5530 for (auto& pair : RegUsage) { 5531 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 5532 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 5533 else 5534 MaxUsages[j][pair.first] = pair.second; 5535 } 5536 } 5537 5538 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5539 << OpenIntervals.size() << '\n'); 5540 5541 // Add the current instruction to the list of open intervals. 5542 OpenIntervals.insert(I); 5543 } 5544 5545 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5546 SmallMapVector<unsigned, unsigned, 4> Invariant; 5547 5548 for (auto Inst : LoopInvariants) { 5549 unsigned Usage = VFs[i] == 1 ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 5550 unsigned ClassID = TTI.getRegisterClassForType(VFs[i] > 1, Inst->getType()); 5551 if (Invariant.find(ClassID) == Invariant.end()) 5552 Invariant[ClassID] = Usage; 5553 else 5554 Invariant[ClassID] += Usage; 5555 } 5556 5557 LLVM_DEBUG({ 5558 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 5559 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 5560 << " item\n"; 5561 for (const auto &pair : MaxUsages[i]) { 5562 dbgs() << "LV(REG): RegisterClass: " 5563 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 5564 << " registers\n"; 5565 } 5566 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 5567 << " item\n"; 5568 for (const auto &pair : Invariant) { 5569 dbgs() << "LV(REG): RegisterClass: " 5570 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 5571 << " registers\n"; 5572 } 5573 }); 5574 5575 RU.LoopInvariantRegs = Invariant; 5576 RU.MaxLocalUsers = MaxUsages[i]; 5577 RUs[i] = RU; 5578 } 5579 5580 return RUs; 5581 } 5582 5583 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 5584 // TODO: Cost model for emulated masked load/store is completely 5585 // broken. This hack guides the cost model to use an artificially 5586 // high enough value to practically disable vectorization with such 5587 // operations, except where previously deployed legality hack allowed 5588 // using very low cost values. This is to avoid regressions coming simply 5589 // from moving "masked load/store" check from legality to cost model. 5590 // Masked Load/Gather emulation was previously never allowed. 5591 // Limited number of Masked Store/Scatter emulation was allowed. 5592 assert(isPredicatedInst(I) && "Expecting a scalar emulated instruction"); 5593 return isa<LoadInst>(I) || 5594 (isa<StoreInst>(I) && 5595 NumPredStores > NumberOfStoresToPredicate); 5596 } 5597 5598 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 5599 // If we aren't vectorizing the loop, or if we've already collected the 5600 // instructions to scalarize, there's nothing to do. Collection may already 5601 // have occurred if we have a user-selected VF and are now computing the 5602 // expected cost for interleaving. 5603 if (VF < 2 || InstsToScalarize.find(VF) != InstsToScalarize.end()) 5604 return; 5605 5606 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 5607 // not profitable to scalarize any instructions, the presence of VF in the 5608 // map will indicate that we've analyzed it already. 5609 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 5610 5611 // Find all the instructions that are scalar with predication in the loop and 5612 // determine if it would be better to not if-convert the blocks they are in. 5613 // If so, we also record the instructions to scalarize. 5614 for (BasicBlock *BB : TheLoop->blocks()) { 5615 if (!blockNeedsPredication(BB)) 5616 continue; 5617 for (Instruction &I : *BB) 5618 if (isScalarWithPredication(&I)) { 5619 ScalarCostsTy ScalarCosts; 5620 // Do not apply discount logic if hacked cost is needed 5621 // for emulated masked memrefs. 5622 if (!useEmulatedMaskMemRefHack(&I) && 5623 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 5624 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 5625 // Remember that BB will remain after vectorization. 5626 PredicatedBBsAfterVectorization.insert(BB); 5627 } 5628 } 5629 } 5630 5631 int LoopVectorizationCostModel::computePredInstDiscount( 5632 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 5633 unsigned VF) { 5634 assert(!isUniformAfterVectorization(PredInst, VF) && 5635 "Instruction marked uniform-after-vectorization will be predicated"); 5636 5637 // Initialize the discount to zero, meaning that the scalar version and the 5638 // vector version cost the same. 5639 int Discount = 0; 5640 5641 // Holds instructions to analyze. The instructions we visit are mapped in 5642 // ScalarCosts. Those instructions are the ones that would be scalarized if 5643 // we find that the scalar version costs less. 5644 SmallVector<Instruction *, 8> Worklist; 5645 5646 // Returns true if the given instruction can be scalarized. 5647 auto canBeScalarized = [&](Instruction *I) -> bool { 5648 // We only attempt to scalarize instructions forming a single-use chain 5649 // from the original predicated block that would otherwise be vectorized. 5650 // Although not strictly necessary, we give up on instructions we know will 5651 // already be scalar to avoid traversing chains that are unlikely to be 5652 // beneficial. 5653 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 5654 isScalarAfterVectorization(I, VF)) 5655 return false; 5656 5657 // If the instruction is scalar with predication, it will be analyzed 5658 // separately. We ignore it within the context of PredInst. 5659 if (isScalarWithPredication(I)) 5660 return false; 5661 5662 // If any of the instruction's operands are uniform after vectorization, 5663 // the instruction cannot be scalarized. This prevents, for example, a 5664 // masked load from being scalarized. 5665 // 5666 // We assume we will only emit a value for lane zero of an instruction 5667 // marked uniform after vectorization, rather than VF identical values. 5668 // Thus, if we scalarize an instruction that uses a uniform, we would 5669 // create uses of values corresponding to the lanes we aren't emitting code 5670 // for. This behavior can be changed by allowing getScalarValue to clone 5671 // the lane zero values for uniforms rather than asserting. 5672 for (Use &U : I->operands()) 5673 if (auto *J = dyn_cast<Instruction>(U.get())) 5674 if (isUniformAfterVectorization(J, VF)) 5675 return false; 5676 5677 // Otherwise, we can scalarize the instruction. 5678 return true; 5679 }; 5680 5681 // Compute the expected cost discount from scalarizing the entire expression 5682 // feeding the predicated instruction. We currently only consider expressions 5683 // that are single-use instruction chains. 5684 Worklist.push_back(PredInst); 5685 while (!Worklist.empty()) { 5686 Instruction *I = Worklist.pop_back_val(); 5687 5688 // If we've already analyzed the instruction, there's nothing to do. 5689 if (ScalarCosts.find(I) != ScalarCosts.end()) 5690 continue; 5691 5692 // Compute the cost of the vector instruction. Note that this cost already 5693 // includes the scalarization overhead of the predicated instruction. 5694 unsigned VectorCost = getInstructionCost(I, VF).first; 5695 5696 // Compute the cost of the scalarized instruction. This cost is the cost of 5697 // the instruction as if it wasn't if-converted and instead remained in the 5698 // predicated block. We will scale this cost by block probability after 5699 // computing the scalarization overhead. 5700 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 5701 5702 // Compute the scalarization overhead of needed insertelement instructions 5703 // and phi nodes. 5704 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 5705 ScalarCost += TTI.getScalarizationOverhead( 5706 cast<VectorType>(ToVectorTy(I->getType(), VF)), 5707 APInt::getAllOnesValue(VF), true, false); 5708 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 5709 } 5710 5711 // Compute the scalarization overhead of needed extractelement 5712 // instructions. For each of the instruction's operands, if the operand can 5713 // be scalarized, add it to the worklist; otherwise, account for the 5714 // overhead. 5715 for (Use &U : I->operands()) 5716 if (auto *J = dyn_cast<Instruction>(U.get())) { 5717 assert(VectorType::isValidElementType(J->getType()) && 5718 "Instruction has non-scalar type"); 5719 if (canBeScalarized(J)) 5720 Worklist.push_back(J); 5721 else if (needsExtract(J, VF)) 5722 ScalarCost += TTI.getScalarizationOverhead( 5723 cast<VectorType>(ToVectorTy(J->getType(), VF)), 5724 APInt::getAllOnesValue(VF), false, true); 5725 } 5726 5727 // Scale the total scalar cost by block probability. 5728 ScalarCost /= getReciprocalPredBlockProb(); 5729 5730 // Compute the discount. A non-negative discount means the vector version 5731 // of the instruction costs more, and scalarizing would be beneficial. 5732 Discount += VectorCost - ScalarCost; 5733 ScalarCosts[I] = ScalarCost; 5734 } 5735 5736 return Discount; 5737 } 5738 5739 LoopVectorizationCostModel::VectorizationCostTy 5740 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5741 VectorizationCostTy Cost; 5742 5743 // For each block. 5744 for (BasicBlock *BB : TheLoop->blocks()) { 5745 VectorizationCostTy BlockCost; 5746 5747 // For each instruction in the old loop. 5748 for (Instruction &I : BB->instructionsWithoutDebug()) { 5749 // Skip ignored values. 5750 if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() || 5751 (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end())) 5752 continue; 5753 5754 VectorizationCostTy C = getInstructionCost(&I, VF); 5755 5756 // Check if we should override the cost. 5757 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 5758 C.first = ForceTargetInstructionCost; 5759 5760 BlockCost.first += C.first; 5761 BlockCost.second |= C.second; 5762 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 5763 << " for VF " << VF << " For instruction: " << I 5764 << '\n'); 5765 } 5766 5767 // If we are vectorizing a predicated block, it will have been 5768 // if-converted. This means that the block's instructions (aside from 5769 // stores and instructions that may divide by zero) will now be 5770 // unconditionally executed. For the scalar case, we may not always execute 5771 // the predicated block. Thus, scale the block's cost by the probability of 5772 // executing it. 5773 if (VF == 1 && blockNeedsPredication(BB)) 5774 BlockCost.first /= getReciprocalPredBlockProb(); 5775 5776 Cost.first += BlockCost.first; 5777 Cost.second |= BlockCost.second; 5778 } 5779 5780 return Cost; 5781 } 5782 5783 /// Gets Address Access SCEV after verifying that the access pattern 5784 /// is loop invariant except the induction variable dependence. 5785 /// 5786 /// This SCEV can be sent to the Target in order to estimate the address 5787 /// calculation cost. 5788 static const SCEV *getAddressAccessSCEV( 5789 Value *Ptr, 5790 LoopVectorizationLegality *Legal, 5791 PredicatedScalarEvolution &PSE, 5792 const Loop *TheLoop) { 5793 5794 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 5795 if (!Gep) 5796 return nullptr; 5797 5798 // We are looking for a gep with all loop invariant indices except for one 5799 // which should be an induction variable. 5800 auto SE = PSE.getSE(); 5801 unsigned NumOperands = Gep->getNumOperands(); 5802 for (unsigned i = 1; i < NumOperands; ++i) { 5803 Value *Opd = Gep->getOperand(i); 5804 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 5805 !Legal->isInductionVariable(Opd)) 5806 return nullptr; 5807 } 5808 5809 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 5810 return PSE.getSCEV(Ptr); 5811 } 5812 5813 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 5814 return Legal->hasStride(I->getOperand(0)) || 5815 Legal->hasStride(I->getOperand(1)); 5816 } 5817 5818 unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 5819 unsigned VF) { 5820 assert(VF > 1 && "Scalarization cost of instruction implies vectorization."); 5821 Type *ValTy = getMemInstValueType(I); 5822 auto SE = PSE.getSE(); 5823 5824 unsigned AS = getLoadStoreAddressSpace(I); 5825 Value *Ptr = getLoadStorePointerOperand(I); 5826 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 5827 5828 // Figure out whether the access is strided and get the stride value 5829 // if it's known in compile time 5830 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 5831 5832 // Get the cost of the scalar memory instruction and address computation. 5833 unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 5834 5835 // Don't pass *I here, since it is scalar but will actually be part of a 5836 // vectorized loop where the user of it is a vectorized instruction. 5837 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5838 Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 5839 Alignment, AS, 5840 TTI::TCK_RecipThroughput); 5841 5842 // Get the overhead of the extractelement and insertelement instructions 5843 // we might create due to scalarization. 5844 Cost += getScalarizationOverhead(I, VF); 5845 5846 // If we have a predicated store, it may not be executed for each vector 5847 // lane. Scale the cost by the probability of executing the predicated 5848 // block. 5849 if (isPredicatedInst(I)) { 5850 Cost /= getReciprocalPredBlockProb(); 5851 5852 if (useEmulatedMaskMemRefHack(I)) 5853 // Artificially setting to a high enough value to practically disable 5854 // vectorization with such operations. 5855 Cost = 3000000; 5856 } 5857 5858 return Cost; 5859 } 5860 5861 unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 5862 unsigned VF) { 5863 Type *ValTy = getMemInstValueType(I); 5864 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5865 Value *Ptr = getLoadStorePointerOperand(I); 5866 unsigned AS = getLoadStoreAddressSpace(I); 5867 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 5868 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 5869 5870 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 5871 "Stride should be 1 or -1 for consecutive memory access"); 5872 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5873 unsigned Cost = 0; 5874 if (Legal->isMaskRequired(I)) 5875 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, 5876 Alignment ? Alignment->value() : 0, AS, 5877 CostKind); 5878 else 5879 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 5880 CostKind, I); 5881 5882 bool Reverse = ConsecutiveStride < 0; 5883 if (Reverse) 5884 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5885 return Cost; 5886 } 5887 5888 unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 5889 unsigned VF) { 5890 Type *ValTy = getMemInstValueType(I); 5891 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5892 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5893 unsigned AS = getLoadStoreAddressSpace(I); 5894 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 5895 if (isa<LoadInst>(I)) { 5896 return TTI.getAddressComputationCost(ValTy) + 5897 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 5898 CostKind) + 5899 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 5900 } 5901 StoreInst *SI = cast<StoreInst>(I); 5902 5903 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 5904 return TTI.getAddressComputationCost(ValTy) + 5905 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 5906 CostKind) + 5907 (isLoopInvariantStoreValue 5908 ? 0 5909 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 5910 VF - 1)); 5911 } 5912 5913 unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 5914 unsigned VF) { 5915 Type *ValTy = getMemInstValueType(I); 5916 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5917 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5918 Value *Ptr = getLoadStorePointerOperand(I); 5919 5920 return TTI.getAddressComputationCost(VectorTy) + 5921 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 5922 Legal->isMaskRequired(I), 5923 Alignment ? Alignment->value() : 0, 5924 TargetTransformInfo::TCK_RecipThroughput, 5925 I); 5926 } 5927 5928 unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 5929 unsigned VF) { 5930 Type *ValTy = getMemInstValueType(I); 5931 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 5932 unsigned AS = getLoadStoreAddressSpace(I); 5933 5934 auto Group = getInterleavedAccessGroup(I); 5935 assert(Group && "Fail to get an interleaved access group."); 5936 5937 unsigned InterleaveFactor = Group->getFactor(); 5938 VectorType *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 5939 5940 // Holds the indices of existing members in an interleaved load group. 5941 // An interleaved store group doesn't need this as it doesn't allow gaps. 5942 SmallVector<unsigned, 4> Indices; 5943 if (isa<LoadInst>(I)) { 5944 for (unsigned i = 0; i < InterleaveFactor; i++) 5945 if (Group->getMember(i)) 5946 Indices.push_back(i); 5947 } 5948 5949 // Calculate the cost of the whole interleaved group. 5950 bool UseMaskForGaps = 5951 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5952 unsigned Cost = TTI.getInterleavedMemoryOpCost( 5953 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 5954 Group->getAlign().value(), AS, TTI::TCK_RecipThroughput, 5955 Legal->isMaskRequired(I), UseMaskForGaps); 5956 5957 if (Group->isReverse()) { 5958 // TODO: Add support for reversed masked interleaved access. 5959 assert(!Legal->isMaskRequired(I) && 5960 "Reverse masked interleaved access not supported."); 5961 Cost += Group->getNumMembers() * 5962 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 5963 } 5964 return Cost; 5965 } 5966 5967 unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 5968 unsigned VF) { 5969 // Calculate scalar cost only. Vectorization cost should be ready at this 5970 // moment. 5971 if (VF == 1) { 5972 Type *ValTy = getMemInstValueType(I); 5973 const MaybeAlign Alignment = getLoadStoreAlignment(I); 5974 unsigned AS = getLoadStoreAddressSpace(I); 5975 5976 return TTI.getAddressComputationCost(ValTy) + 5977 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 5978 TTI::TCK_RecipThroughput, I); 5979 } 5980 return getWideningCost(I, VF); 5981 } 5982 5983 LoopVectorizationCostModel::VectorizationCostTy 5984 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 5985 // If we know that this instruction will remain uniform, check the cost of 5986 // the scalar version. 5987 if (isUniformAfterVectorization(I, VF)) 5988 VF = 1; 5989 5990 if (VF > 1 && isProfitableToScalarize(I, VF)) 5991 return VectorizationCostTy(InstsToScalarize[VF][I], false); 5992 5993 // Forced scalars do not have any scalarization overhead. 5994 auto ForcedScalar = ForcedScalars.find(VF); 5995 if (VF > 1 && ForcedScalar != ForcedScalars.end()) { 5996 auto InstSet = ForcedScalar->second; 5997 if (InstSet.find(I) != InstSet.end()) 5998 return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false); 5999 } 6000 6001 Type *VectorTy; 6002 unsigned C = getInstructionCost(I, VF, VectorTy); 6003 6004 bool TypeNotScalarized = 6005 VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF; 6006 return VectorizationCostTy(C, TypeNotScalarized); 6007 } 6008 6009 unsigned LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6010 unsigned VF) { 6011 6012 if (VF == 1) 6013 return 0; 6014 6015 unsigned Cost = 0; 6016 Type *RetTy = ToVectorTy(I->getType(), VF); 6017 if (!RetTy->isVoidTy() && 6018 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6019 Cost += TTI.getScalarizationOverhead( 6020 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF), true, false); 6021 6022 // Some targets keep addresses scalar. 6023 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6024 return Cost; 6025 6026 // Some targets support efficient element stores. 6027 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6028 return Cost; 6029 6030 // Collect operands to consider. 6031 CallInst *CI = dyn_cast<CallInst>(I); 6032 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 6033 6034 // Skip operands that do not require extraction/scalarization and do not incur 6035 // any overhead. 6036 return Cost + TTI.getOperandsScalarizationOverhead( 6037 filterExtractingOperands(Ops, VF), VF); 6038 } 6039 6040 void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) { 6041 if (VF == 1) 6042 return; 6043 NumPredStores = 0; 6044 for (BasicBlock *BB : TheLoop->blocks()) { 6045 // For each instruction in the old loop. 6046 for (Instruction &I : *BB) { 6047 Value *Ptr = getLoadStorePointerOperand(&I); 6048 if (!Ptr) 6049 continue; 6050 6051 // TODO: We should generate better code and update the cost model for 6052 // predicated uniform stores. Today they are treated as any other 6053 // predicated store (see added test cases in 6054 // invariant-store-vectorization.ll). 6055 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 6056 NumPredStores++; 6057 6058 if (Legal->isUniform(Ptr) && 6059 // Conditional loads and stores should be scalarized and predicated. 6060 // isScalarWithPredication cannot be used here since masked 6061 // gather/scatters are not considered scalar with predication. 6062 !Legal->blockNeedsPredication(I.getParent())) { 6063 // TODO: Avoid replicating loads and stores instead of 6064 // relying on instcombine to remove them. 6065 // Load: Scalar load + broadcast 6066 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6067 unsigned Cost = getUniformMemOpCost(&I, VF); 6068 setWideningDecision(&I, VF, CM_Scalarize, Cost); 6069 continue; 6070 } 6071 6072 // We assume that widening is the best solution when possible. 6073 if (memoryInstructionCanBeWidened(&I, VF)) { 6074 unsigned Cost = getConsecutiveMemOpCost(&I, VF); 6075 int ConsecutiveStride = 6076 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 6077 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6078 "Expected consecutive stride."); 6079 InstWidening Decision = 6080 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6081 setWideningDecision(&I, VF, Decision, Cost); 6082 continue; 6083 } 6084 6085 // Choose between Interleaving, Gather/Scatter or Scalarization. 6086 unsigned InterleaveCost = std::numeric_limits<unsigned>::max(); 6087 unsigned NumAccesses = 1; 6088 if (isAccessInterleaved(&I)) { 6089 auto Group = getInterleavedAccessGroup(&I); 6090 assert(Group && "Fail to get an interleaved access group."); 6091 6092 // Make one decision for the whole group. 6093 if (getWideningDecision(&I, VF) != CM_Unknown) 6094 continue; 6095 6096 NumAccesses = Group->getNumMembers(); 6097 if (interleavedAccessCanBeWidened(&I, VF)) 6098 InterleaveCost = getInterleaveGroupCost(&I, VF); 6099 } 6100 6101 unsigned GatherScatterCost = 6102 isLegalGatherOrScatter(&I) 6103 ? getGatherScatterCost(&I, VF) * NumAccesses 6104 : std::numeric_limits<unsigned>::max(); 6105 6106 unsigned ScalarizationCost = 6107 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6108 6109 // Choose better solution for the current VF, 6110 // write down this decision and use it during vectorization. 6111 unsigned Cost; 6112 InstWidening Decision; 6113 if (InterleaveCost <= GatherScatterCost && 6114 InterleaveCost < ScalarizationCost) { 6115 Decision = CM_Interleave; 6116 Cost = InterleaveCost; 6117 } else if (GatherScatterCost < ScalarizationCost) { 6118 Decision = CM_GatherScatter; 6119 Cost = GatherScatterCost; 6120 } else { 6121 Decision = CM_Scalarize; 6122 Cost = ScalarizationCost; 6123 } 6124 // If the instructions belongs to an interleave group, the whole group 6125 // receives the same decision. The whole group receives the cost, but 6126 // the cost will actually be assigned to one instruction. 6127 if (auto Group = getInterleavedAccessGroup(&I)) 6128 setWideningDecision(Group, VF, Decision, Cost); 6129 else 6130 setWideningDecision(&I, VF, Decision, Cost); 6131 } 6132 } 6133 6134 // Make sure that any load of address and any other address computation 6135 // remains scalar unless there is gather/scatter support. This avoids 6136 // inevitable extracts into address registers, and also has the benefit of 6137 // activating LSR more, since that pass can't optimize vectorized 6138 // addresses. 6139 if (TTI.prefersVectorizedAddressing()) 6140 return; 6141 6142 // Start with all scalar pointer uses. 6143 SmallPtrSet<Instruction *, 8> AddrDefs; 6144 for (BasicBlock *BB : TheLoop->blocks()) 6145 for (Instruction &I : *BB) { 6146 Instruction *PtrDef = 6147 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6148 if (PtrDef && TheLoop->contains(PtrDef) && 6149 getWideningDecision(&I, VF) != CM_GatherScatter) 6150 AddrDefs.insert(PtrDef); 6151 } 6152 6153 // Add all instructions used to generate the addresses. 6154 SmallVector<Instruction *, 4> Worklist; 6155 for (auto *I : AddrDefs) 6156 Worklist.push_back(I); 6157 while (!Worklist.empty()) { 6158 Instruction *I = Worklist.pop_back_val(); 6159 for (auto &Op : I->operands()) 6160 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6161 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6162 AddrDefs.insert(InstOp).second) 6163 Worklist.push_back(InstOp); 6164 } 6165 6166 for (auto *I : AddrDefs) { 6167 if (isa<LoadInst>(I)) { 6168 // Setting the desired widening decision should ideally be handled in 6169 // by cost functions, but since this involves the task of finding out 6170 // if the loaded register is involved in an address computation, it is 6171 // instead changed here when we know this is the case. 6172 InstWidening Decision = getWideningDecision(I, VF); 6173 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6174 // Scalarize a widened load of address. 6175 setWideningDecision(I, VF, CM_Scalarize, 6176 (VF * getMemoryInstructionCost(I, 1))); 6177 else if (auto Group = getInterleavedAccessGroup(I)) { 6178 // Scalarize an interleave group of address loads. 6179 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6180 if (Instruction *Member = Group->getMember(I)) 6181 setWideningDecision(Member, VF, CM_Scalarize, 6182 (VF * getMemoryInstructionCost(Member, 1))); 6183 } 6184 } 6185 } else 6186 // Make sure I gets scalarized and a cost estimate without 6187 // scalarization overhead. 6188 ForcedScalars[VF].insert(I); 6189 } 6190 } 6191 6192 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6193 unsigned VF, 6194 Type *&VectorTy) { 6195 Type *RetTy = I->getType(); 6196 if (canTruncateToMinimalBitwidth(I, VF)) 6197 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6198 VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF); 6199 auto SE = PSE.getSE(); 6200 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6201 6202 // TODO: We need to estimate the cost of intrinsic calls. 6203 switch (I->getOpcode()) { 6204 case Instruction::GetElementPtr: 6205 // We mark this instruction as zero-cost because the cost of GEPs in 6206 // vectorized code depends on whether the corresponding memory instruction 6207 // is scalarized or not. Therefore, we handle GEPs with the memory 6208 // instruction cost. 6209 return 0; 6210 case Instruction::Br: { 6211 // In cases of scalarized and predicated instructions, there will be VF 6212 // predicated blocks in the vectorized loop. Each branch around these 6213 // blocks requires also an extract of its vector compare i1 element. 6214 bool ScalarPredicatedBB = false; 6215 BranchInst *BI = cast<BranchInst>(I); 6216 if (VF > 1 && BI->isConditional() && 6217 (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) != 6218 PredicatedBBsAfterVectorization.end() || 6219 PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) != 6220 PredicatedBBsAfterVectorization.end())) 6221 ScalarPredicatedBB = true; 6222 6223 if (ScalarPredicatedBB) { 6224 // Return cost for branches around scalarized and predicated blocks. 6225 VectorType *Vec_i1Ty = 6226 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 6227 return (TTI.getScalarizationOverhead(Vec_i1Ty, APInt::getAllOnesValue(VF), 6228 false, true) + 6229 (TTI.getCFInstrCost(Instruction::Br) * VF)); 6230 } else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1) 6231 // The back-edge branch will remain, as will all scalar branches. 6232 return TTI.getCFInstrCost(Instruction::Br); 6233 else 6234 // This branch will be eliminated by if-conversion. 6235 return 0; 6236 // Note: We currently assume zero cost for an unconditional branch inside 6237 // a predicated block since it will become a fall-through, although we 6238 // may decide in the future to call TTI for all branches. 6239 } 6240 case Instruction::PHI: { 6241 auto *Phi = cast<PHINode>(I); 6242 6243 // First-order recurrences are replaced by vector shuffles inside the loop. 6244 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 6245 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6246 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6247 cast<VectorType>(VectorTy), VF - 1, 6248 VectorType::get(RetTy, 1)); 6249 6250 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 6251 // converted into select instructions. We require N - 1 selects per phi 6252 // node, where N is the number of incoming values. 6253 if (VF > 1 && Phi->getParent() != TheLoop->getHeader()) 6254 return (Phi->getNumIncomingValues() - 1) * 6255 TTI.getCmpSelInstrCost( 6256 Instruction::Select, ToVectorTy(Phi->getType(), VF), 6257 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 6258 CostKind); 6259 6260 return TTI.getCFInstrCost(Instruction::PHI); 6261 } 6262 case Instruction::UDiv: 6263 case Instruction::SDiv: 6264 case Instruction::URem: 6265 case Instruction::SRem: 6266 // If we have a predicated instruction, it may not be executed for each 6267 // vector lane. Get the scalarization cost and scale this amount by the 6268 // probability of executing the predicated block. If the instruction is not 6269 // predicated, we fall through to the next case. 6270 if (VF > 1 && isScalarWithPredication(I)) { 6271 unsigned Cost = 0; 6272 6273 // These instructions have a non-void type, so account for the phi nodes 6274 // that we will create. This cost is likely to be zero. The phi node 6275 // cost, if any, should be scaled by the block probability because it 6276 // models a copy at the end of each predicated block. 6277 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 6278 6279 // The cost of the non-predicated instruction. 6280 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 6281 6282 // The cost of insertelement and extractelement instructions needed for 6283 // scalarization. 6284 Cost += getScalarizationOverhead(I, VF); 6285 6286 // Scale the cost by the probability of executing the predicated blocks. 6287 // This assumes the predicated block for each vector lane is equally 6288 // likely. 6289 return Cost / getReciprocalPredBlockProb(); 6290 } 6291 LLVM_FALLTHROUGH; 6292 case Instruction::Add: 6293 case Instruction::FAdd: 6294 case Instruction::Sub: 6295 case Instruction::FSub: 6296 case Instruction::Mul: 6297 case Instruction::FMul: 6298 case Instruction::FDiv: 6299 case Instruction::FRem: 6300 case Instruction::Shl: 6301 case Instruction::LShr: 6302 case Instruction::AShr: 6303 case Instruction::And: 6304 case Instruction::Or: 6305 case Instruction::Xor: { 6306 // Since we will replace the stride by 1 the multiplication should go away. 6307 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6308 return 0; 6309 // Certain instructions can be cheaper to vectorize if they have a constant 6310 // second vector operand. One example of this are shifts on x86. 6311 Value *Op2 = I->getOperand(1); 6312 TargetTransformInfo::OperandValueProperties Op2VP; 6313 TargetTransformInfo::OperandValueKind Op2VK = 6314 TTI.getOperandInfo(Op2, Op2VP); 6315 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 6316 Op2VK = TargetTransformInfo::OK_UniformValue; 6317 6318 SmallVector<const Value *, 4> Operands(I->operand_values()); 6319 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6320 return N * TTI.getArithmeticInstrCost( 6321 I->getOpcode(), VectorTy, CostKind, 6322 TargetTransformInfo::OK_AnyValue, 6323 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 6324 } 6325 case Instruction::FNeg: { 6326 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6327 return N * TTI.getArithmeticInstrCost( 6328 I->getOpcode(), VectorTy, CostKind, 6329 TargetTransformInfo::OK_AnyValue, 6330 TargetTransformInfo::OK_AnyValue, 6331 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None, 6332 I->getOperand(0), I); 6333 } 6334 case Instruction::Select: { 6335 SelectInst *SI = cast<SelectInst>(I); 6336 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6337 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6338 Type *CondTy = SI->getCondition()->getType(); 6339 if (!ScalarCond) 6340 CondTy = VectorType::get(CondTy, VF); 6341 6342 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 6343 CostKind, I); 6344 } 6345 case Instruction::ICmp: 6346 case Instruction::FCmp: { 6347 Type *ValTy = I->getOperand(0)->getType(); 6348 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6349 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 6350 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 6351 VectorTy = ToVectorTy(ValTy, VF); 6352 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, CostKind, 6353 I); 6354 } 6355 case Instruction::Store: 6356 case Instruction::Load: { 6357 unsigned Width = VF; 6358 if (Width > 1) { 6359 InstWidening Decision = getWideningDecision(I, Width); 6360 assert(Decision != CM_Unknown && 6361 "CM decision should be taken at this point"); 6362 if (Decision == CM_Scalarize) 6363 Width = 1; 6364 } 6365 VectorTy = ToVectorTy(getMemInstValueType(I), Width); 6366 return getMemoryInstructionCost(I, VF); 6367 } 6368 case Instruction::ZExt: 6369 case Instruction::SExt: 6370 case Instruction::FPToUI: 6371 case Instruction::FPToSI: 6372 case Instruction::FPExt: 6373 case Instruction::PtrToInt: 6374 case Instruction::IntToPtr: 6375 case Instruction::SIToFP: 6376 case Instruction::UIToFP: 6377 case Instruction::Trunc: 6378 case Instruction::FPTrunc: 6379 case Instruction::BitCast: { 6380 // We optimize the truncation of induction variables having constant 6381 // integer steps. The cost of these truncations is the same as the scalar 6382 // operation. 6383 if (isOptimizableIVTruncate(I, VF)) { 6384 auto *Trunc = cast<TruncInst>(I); 6385 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 6386 Trunc->getSrcTy(), CostKind, Trunc); 6387 } 6388 6389 Type *SrcScalarTy = I->getOperand(0)->getType(); 6390 Type *SrcVecTy = 6391 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 6392 if (canTruncateToMinimalBitwidth(I, VF)) { 6393 // This cast is going to be shrunk. This may remove the cast or it might 6394 // turn it into slightly different cast. For example, if MinBW == 16, 6395 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6396 // 6397 // Calculate the modified src and dest types. 6398 Type *MinVecTy = VectorTy; 6399 if (I->getOpcode() == Instruction::Trunc) { 6400 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6401 VectorTy = 6402 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6403 } else if (I->getOpcode() == Instruction::ZExt || 6404 I->getOpcode() == Instruction::SExt) { 6405 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6406 VectorTy = 6407 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6408 } 6409 } 6410 6411 unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1; 6412 return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, 6413 CostKind, I); 6414 } 6415 case Instruction::Call: { 6416 bool NeedToScalarize; 6417 CallInst *CI = cast<CallInst>(I); 6418 unsigned CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 6419 if (getVectorIntrinsicIDForCall(CI, TLI)) 6420 return std::min(CallCost, getVectorIntrinsicCost(CI, VF)); 6421 return CallCost; 6422 } 6423 default: 6424 // The cost of executing VF copies of the scalar instruction. This opcode 6425 // is unknown. Assume that it is the same as 'mul'. 6426 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, 6427 CostKind) + 6428 getScalarizationOverhead(I, VF); 6429 } // end of switch. 6430 } 6431 6432 char LoopVectorize::ID = 0; 6433 6434 static const char lv_name[] = "Loop Vectorization"; 6435 6436 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6437 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6438 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6439 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6440 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6441 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6442 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6443 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6444 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6445 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6446 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6447 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6448 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6449 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 6450 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 6451 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6452 6453 namespace llvm { 6454 6455 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 6456 6457 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 6458 bool VectorizeOnlyWhenForced) { 6459 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 6460 } 6461 6462 } // end namespace llvm 6463 6464 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6465 // Check if the pointer operand of a load or store instruction is 6466 // consecutive. 6467 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 6468 return Legal->isConsecutivePtr(Ptr); 6469 return false; 6470 } 6471 6472 void LoopVectorizationCostModel::collectValuesToIgnore() { 6473 // Ignore ephemeral values. 6474 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6475 6476 // Ignore type-promoting instructions we identified during reduction 6477 // detection. 6478 for (auto &Reduction : Legal->getReductionVars()) { 6479 RecurrenceDescriptor &RedDes = Reduction.second; 6480 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6481 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6482 } 6483 // Ignore type-casting instructions we identified during induction 6484 // detection. 6485 for (auto &Induction : Legal->getInductionVars()) { 6486 InductionDescriptor &IndDes = Induction.second; 6487 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6488 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6489 } 6490 } 6491 6492 // TODO: we could return a pair of values that specify the max VF and 6493 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 6494 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 6495 // doesn't have a cost model that can choose which plan to execute if 6496 // more than one is generated. 6497 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 6498 LoopVectorizationCostModel &CM) { 6499 unsigned WidestType; 6500 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 6501 return WidestVectorRegBits / WidestType; 6502 } 6503 6504 VectorizationFactor 6505 LoopVectorizationPlanner::planInVPlanNativePath(unsigned UserVF) { 6506 unsigned VF = UserVF; 6507 // Outer loop handling: They may require CFG and instruction level 6508 // transformations before even evaluating whether vectorization is profitable. 6509 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 6510 // the vectorization pipeline. 6511 if (!OrigLoop->empty()) { 6512 // If the user doesn't provide a vectorization factor, determine a 6513 // reasonable one. 6514 if (!UserVF) { 6515 VF = determineVPlanVF(TTI->getRegisterBitWidth(true /* Vector*/), CM); 6516 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 6517 6518 // Make sure we have a VF > 1 for stress testing. 6519 if (VPlanBuildStressTest && VF < 2) { 6520 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 6521 << "overriding computed VF.\n"); 6522 VF = 4; 6523 } 6524 } 6525 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 6526 assert(isPowerOf2_32(VF) && "VF needs to be a power of two"); 6527 LLVM_DEBUG(dbgs() << "LV: Using " << (UserVF ? "user " : "") << "VF " << VF 6528 << " to build VPlans.\n"); 6529 buildVPlans(VF, VF); 6530 6531 // For VPlan build stress testing, we bail out after VPlan construction. 6532 if (VPlanBuildStressTest) 6533 return VectorizationFactor::Disabled(); 6534 6535 return {VF, 0}; 6536 } 6537 6538 LLVM_DEBUG( 6539 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 6540 "VPlan-native path.\n"); 6541 return VectorizationFactor::Disabled(); 6542 } 6543 6544 Optional<VectorizationFactor> LoopVectorizationPlanner::plan(unsigned UserVF) { 6545 assert(OrigLoop->empty() && "Inner loop expected."); 6546 Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(); 6547 if (!MaybeMaxVF) // Cases that should not to be vectorized nor interleaved. 6548 return None; 6549 6550 // Invalidate interleave groups if all blocks of loop will be predicated. 6551 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 6552 !useMaskedInterleavedAccesses(*TTI)) { 6553 LLVM_DEBUG( 6554 dbgs() 6555 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 6556 "which requires masked-interleaved support.\n"); 6557 if (CM.InterleaveInfo.invalidateGroups()) 6558 // Invalidating interleave groups also requires invalidating all decisions 6559 // based on them, which includes widening decisions and uniform and scalar 6560 // values. 6561 CM.invalidateCostModelingDecisions(); 6562 } 6563 6564 if (UserVF) { 6565 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6566 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6567 // Collect the instructions (and their associated costs) that will be more 6568 // profitable to scalarize. 6569 CM.selectUserVectorizationFactor(UserVF); 6570 buildVPlansWithVPRecipes(UserVF, UserVF); 6571 LLVM_DEBUG(printPlans(dbgs())); 6572 return {{UserVF, 0}}; 6573 } 6574 6575 unsigned MaxVF = MaybeMaxVF.getValue(); 6576 assert(MaxVF != 0 && "MaxVF is zero."); 6577 6578 for (unsigned VF = 1; VF <= MaxVF; VF *= 2) { 6579 // Collect Uniform and Scalar instructions after vectorization with VF. 6580 CM.collectUniformsAndScalars(VF); 6581 6582 // Collect the instructions (and their associated costs) that will be more 6583 // profitable to scalarize. 6584 if (VF > 1) 6585 CM.collectInstsToScalarize(VF); 6586 } 6587 6588 buildVPlansWithVPRecipes(1, MaxVF); 6589 LLVM_DEBUG(printPlans(dbgs())); 6590 if (MaxVF == 1) 6591 return VectorizationFactor::Disabled(); 6592 6593 // Select the optimal vectorization factor. 6594 return CM.selectVectorizationFactor(MaxVF); 6595 } 6596 6597 void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) { 6598 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 6599 << '\n'); 6600 BestVF = VF; 6601 BestUF = UF; 6602 6603 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 6604 return !Plan->hasVF(VF); 6605 }); 6606 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 6607 } 6608 6609 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 6610 DominatorTree *DT) { 6611 // Perform the actual loop transformation. 6612 6613 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 6614 VPCallbackILV CallbackILV(ILV); 6615 6616 VPTransformState State{BestVF, BestUF, LI, 6617 DT, ILV.Builder, ILV.VectorLoopValueMap, 6618 &ILV, CallbackILV}; 6619 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 6620 State.TripCount = ILV.getOrCreateTripCount(nullptr); 6621 State.CanonicalIV = ILV.Induction; 6622 6623 //===------------------------------------------------===// 6624 // 6625 // Notice: any optimization or new instruction that go 6626 // into the code below should also be implemented in 6627 // the cost-model. 6628 // 6629 //===------------------------------------------------===// 6630 6631 // 2. Copy and widen instructions from the old loop into the new loop. 6632 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 6633 VPlans.front()->execute(&State); 6634 6635 // 3. Fix the vectorized code: take care of header phi's, live-outs, 6636 // predication, updating analyses. 6637 ILV.fixVectorizedLoop(); 6638 } 6639 6640 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 6641 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 6642 BasicBlock *Latch = OrigLoop->getLoopLatch(); 6643 6644 // We create new control-flow for the vectorized loop, so the original 6645 // condition will be dead after vectorization if it's only used by the 6646 // branch. 6647 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 6648 if (Cmp && Cmp->hasOneUse()) 6649 DeadInstructions.insert(Cmp); 6650 6651 // We create new "steps" for induction variable updates to which the original 6652 // induction variables map. An original update instruction will be dead if 6653 // all its users except the induction variable are dead. 6654 for (auto &Induction : Legal->getInductionVars()) { 6655 PHINode *Ind = Induction.first; 6656 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 6657 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 6658 return U == Ind || DeadInstructions.find(cast<Instruction>(U)) != 6659 DeadInstructions.end(); 6660 })) 6661 DeadInstructions.insert(IndUpdate); 6662 6663 // We record as "Dead" also the type-casting instructions we had identified 6664 // during induction analysis. We don't need any handling for them in the 6665 // vectorized loop because we have proven that, under a proper runtime 6666 // test guarding the vectorized loop, the value of the phi, and the casted 6667 // value of the phi, are the same. The last instruction in this casting chain 6668 // will get its scalar/vector/widened def from the scalar/vector/widened def 6669 // of the respective phi node. Any other casts in the induction def-use chain 6670 // have no other uses outside the phi update chain, and will be ignored. 6671 InductionDescriptor &IndDes = Induction.second; 6672 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 6673 DeadInstructions.insert(Casts.begin(), Casts.end()); 6674 } 6675 } 6676 6677 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6678 6679 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6680 6681 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6682 Instruction::BinaryOps BinOp) { 6683 // When unrolling and the VF is 1, we only need to add a simple scalar. 6684 Type *Ty = Val->getType(); 6685 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6686 6687 if (Ty->isFloatingPointTy()) { 6688 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6689 6690 // Floating point operations had to be 'fast' to enable the unrolling. 6691 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6692 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6693 } 6694 Constant *C = ConstantInt::get(Ty, StartIdx); 6695 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6696 } 6697 6698 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6699 SmallVector<Metadata *, 4> MDs; 6700 // Reserve first location for self reference to the LoopID metadata node. 6701 MDs.push_back(nullptr); 6702 bool IsUnrollMetadata = false; 6703 MDNode *LoopID = L->getLoopID(); 6704 if (LoopID) { 6705 // First find existing loop unrolling disable metadata. 6706 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6707 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6708 if (MD) { 6709 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6710 IsUnrollMetadata = 6711 S && S->getString().startswith("llvm.loop.unroll.disable"); 6712 } 6713 MDs.push_back(LoopID->getOperand(i)); 6714 } 6715 } 6716 6717 if (!IsUnrollMetadata) { 6718 // Add runtime unroll disable metadata. 6719 LLVMContext &Context = L->getHeader()->getContext(); 6720 SmallVector<Metadata *, 1> DisableOperands; 6721 DisableOperands.push_back( 6722 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6723 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6724 MDs.push_back(DisableNode); 6725 MDNode *NewLoopID = MDNode::get(Context, MDs); 6726 // Set operand 0 to refer to the loop id itself. 6727 NewLoopID->replaceOperandWith(0, NewLoopID); 6728 L->setLoopID(NewLoopID); 6729 } 6730 } 6731 6732 bool LoopVectorizationPlanner::getDecisionAndClampRange( 6733 const std::function<bool(unsigned)> &Predicate, VFRange &Range) { 6734 assert(Range.End > Range.Start && "Trying to test an empty VF range."); 6735 bool PredicateAtRangeStart = Predicate(Range.Start); 6736 6737 for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2) 6738 if (Predicate(TmpVF) != PredicateAtRangeStart) { 6739 Range.End = TmpVF; 6740 break; 6741 } 6742 6743 return PredicateAtRangeStart; 6744 } 6745 6746 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 6747 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 6748 /// of VF's starting at a given VF and extending it as much as possible. Each 6749 /// vectorization decision can potentially shorten this sub-range during 6750 /// buildVPlan(). 6751 void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) { 6752 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 6753 VFRange SubRange = {VF, MaxVF + 1}; 6754 VPlans.push_back(buildVPlan(SubRange)); 6755 VF = SubRange.End; 6756 } 6757 } 6758 6759 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 6760 VPlanPtr &Plan) { 6761 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 6762 6763 // Look for cached value. 6764 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 6765 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 6766 if (ECEntryIt != EdgeMaskCache.end()) 6767 return ECEntryIt->second; 6768 6769 VPValue *SrcMask = createBlockInMask(Src, Plan); 6770 6771 // The terminator has to be a branch inst! 6772 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 6773 assert(BI && "Unexpected terminator found"); 6774 6775 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 6776 return EdgeMaskCache[Edge] = SrcMask; 6777 6778 VPValue *EdgeMask = Plan->getVPValue(BI->getCondition()); 6779 assert(EdgeMask && "No Edge Mask found for condition"); 6780 6781 if (BI->getSuccessor(0) != Dst) 6782 EdgeMask = Builder.createNot(EdgeMask); 6783 6784 if (SrcMask) // Otherwise block in-mask is all-one, no need to AND. 6785 EdgeMask = Builder.createAnd(EdgeMask, SrcMask); 6786 6787 return EdgeMaskCache[Edge] = EdgeMask; 6788 } 6789 6790 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 6791 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 6792 6793 // Look for cached value. 6794 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 6795 if (BCEntryIt != BlockMaskCache.end()) 6796 return BCEntryIt->second; 6797 6798 // All-one mask is modelled as no-mask following the convention for masked 6799 // load/store/gather/scatter. Initialize BlockMask to no-mask. 6800 VPValue *BlockMask = nullptr; 6801 6802 if (OrigLoop->getHeader() == BB) { 6803 if (!CM.blockNeedsPredication(BB)) 6804 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 6805 6806 // Introduce the early-exit compare IV <= BTC to form header block mask. 6807 // This is used instead of IV < TC because TC may wrap, unlike BTC. 6808 // Start by constructing the desired canonical IV. 6809 VPValue *IV = nullptr; 6810 if (Legal->getPrimaryInduction()) 6811 IV = Plan->getVPValue(Legal->getPrimaryInduction()); 6812 else { 6813 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 6814 Builder.getInsertBlock()->appendRecipe(IVRecipe); 6815 IV = IVRecipe->getVPValue(); 6816 } 6817 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 6818 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 6819 return BlockMaskCache[BB] = BlockMask; 6820 } 6821 6822 // This is the block mask. We OR all incoming edges. 6823 for (auto *Predecessor : predecessors(BB)) { 6824 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 6825 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 6826 return BlockMaskCache[BB] = EdgeMask; 6827 6828 if (!BlockMask) { // BlockMask has its initialized nullptr value. 6829 BlockMask = EdgeMask; 6830 continue; 6831 } 6832 6833 BlockMask = Builder.createOr(BlockMask, EdgeMask); 6834 } 6835 6836 return BlockMaskCache[BB] = BlockMask; 6837 } 6838 6839 VPWidenMemoryInstructionRecipe * 6840 VPRecipeBuilder::tryToWidenMemory(Instruction *I, VFRange &Range, 6841 VPlanPtr &Plan) { 6842 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 6843 "Must be called with either a load or store"); 6844 6845 auto willWiden = [&](unsigned VF) -> bool { 6846 if (VF == 1) 6847 return false; 6848 LoopVectorizationCostModel::InstWidening Decision = 6849 CM.getWideningDecision(I, VF); 6850 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 6851 "CM decision should be taken at this point."); 6852 if (Decision == LoopVectorizationCostModel::CM_Interleave) 6853 return true; 6854 if (CM.isScalarAfterVectorization(I, VF) || 6855 CM.isProfitableToScalarize(I, VF)) 6856 return false; 6857 return Decision != LoopVectorizationCostModel::CM_Scalarize; 6858 }; 6859 6860 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6861 return nullptr; 6862 6863 VPValue *Mask = nullptr; 6864 if (Legal->isMaskRequired(I)) 6865 Mask = createBlockInMask(I->getParent(), Plan); 6866 6867 VPValue *Addr = Plan->getOrAddVPValue(getLoadStorePointerOperand(I)); 6868 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 6869 return new VPWidenMemoryInstructionRecipe(*Load, Addr, Mask); 6870 6871 StoreInst *Store = cast<StoreInst>(I); 6872 VPValue *StoredValue = Plan->getOrAddVPValue(Store->getValueOperand()); 6873 return new VPWidenMemoryInstructionRecipe(*Store, Addr, StoredValue, Mask); 6874 } 6875 6876 VPWidenIntOrFpInductionRecipe * 6877 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi) const { 6878 // Check if this is an integer or fp induction. If so, build the recipe that 6879 // produces its scalar and vector values. 6880 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 6881 if (II.getKind() == InductionDescriptor::IK_IntInduction || 6882 II.getKind() == InductionDescriptor::IK_FpInduction) 6883 return new VPWidenIntOrFpInductionRecipe(Phi); 6884 6885 return nullptr; 6886 } 6887 6888 VPWidenIntOrFpInductionRecipe * 6889 VPRecipeBuilder::tryToOptimizeInductionTruncate(TruncInst *I, 6890 VFRange &Range) const { 6891 // Optimize the special case where the source is a constant integer 6892 // induction variable. Notice that we can only optimize the 'trunc' case 6893 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 6894 // (c) other casts depend on pointer size. 6895 6896 // Determine whether \p K is a truncation based on an induction variable that 6897 // can be optimized. 6898 auto isOptimizableIVTruncate = 6899 [&](Instruction *K) -> std::function<bool(unsigned)> { 6900 return 6901 [=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); }; 6902 }; 6903 6904 if (LoopVectorizationPlanner::getDecisionAndClampRange( 6905 isOptimizableIVTruncate(I), Range)) 6906 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 6907 I); 6908 return nullptr; 6909 } 6910 6911 VPBlendRecipe *VPRecipeBuilder::tryToBlend(PHINode *Phi, VPlanPtr &Plan) { 6912 // We know that all PHIs in non-header blocks are converted into selects, so 6913 // we don't have to worry about the insertion order and we can just use the 6914 // builder. At this point we generate the predication tree. There may be 6915 // duplications since this is a simple recursive scan, but future 6916 // optimizations will clean it up. 6917 6918 SmallVector<VPValue *, 2> Operands; 6919 unsigned NumIncoming = Phi->getNumIncomingValues(); 6920 for (unsigned In = 0; In < NumIncoming; In++) { 6921 VPValue *EdgeMask = 6922 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 6923 assert((EdgeMask || NumIncoming == 1) && 6924 "Multiple predecessors with one having a full mask"); 6925 Operands.push_back(Plan->getOrAddVPValue(Phi->getIncomingValue(In))); 6926 if (EdgeMask) 6927 Operands.push_back(EdgeMask); 6928 } 6929 return new VPBlendRecipe(Phi, Operands); 6930 } 6931 6932 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, VFRange &Range, 6933 VPlan &Plan) const { 6934 6935 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 6936 [this, CI](unsigned VF) { return CM.isScalarWithPredication(CI, VF); }, 6937 Range); 6938 6939 if (IsPredicated) 6940 return nullptr; 6941 6942 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6943 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 6944 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect)) 6945 return nullptr; 6946 6947 auto willWiden = [&](unsigned VF) -> bool { 6948 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6949 // The following case may be scalarized depending on the VF. 6950 // The flag shows whether we use Intrinsic or a usual Call for vectorized 6951 // version of the instruction. 6952 // Is it beneficial to perform intrinsic call compared to lib call? 6953 bool NeedToScalarize = false; 6954 unsigned CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 6955 bool UseVectorIntrinsic = 6956 ID && CM.getVectorIntrinsicCost(CI, VF) <= CallCost; 6957 return UseVectorIntrinsic || !NeedToScalarize; 6958 }; 6959 6960 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 6961 return nullptr; 6962 6963 return new VPWidenCallRecipe(*CI, Plan.mapToVPValues(CI->arg_operands())); 6964 } 6965 6966 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 6967 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 6968 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 6969 // Instruction should be widened, unless it is scalar after vectorization, 6970 // scalarization is profitable or it is predicated. 6971 auto WillScalarize = [this, I](unsigned VF) -> bool { 6972 return CM.isScalarAfterVectorization(I, VF) || 6973 CM.isProfitableToScalarize(I, VF) || 6974 CM.isScalarWithPredication(I, VF); 6975 }; 6976 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 6977 Range); 6978 } 6979 6980 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, VPlan &Plan) const { 6981 auto IsVectorizableOpcode = [](unsigned Opcode) { 6982 switch (Opcode) { 6983 case Instruction::Add: 6984 case Instruction::And: 6985 case Instruction::AShr: 6986 case Instruction::BitCast: 6987 case Instruction::FAdd: 6988 case Instruction::FCmp: 6989 case Instruction::FDiv: 6990 case Instruction::FMul: 6991 case Instruction::FNeg: 6992 case Instruction::FPExt: 6993 case Instruction::FPToSI: 6994 case Instruction::FPToUI: 6995 case Instruction::FPTrunc: 6996 case Instruction::FRem: 6997 case Instruction::FSub: 6998 case Instruction::ICmp: 6999 case Instruction::IntToPtr: 7000 case Instruction::LShr: 7001 case Instruction::Mul: 7002 case Instruction::Or: 7003 case Instruction::PtrToInt: 7004 case Instruction::SDiv: 7005 case Instruction::Select: 7006 case Instruction::SExt: 7007 case Instruction::Shl: 7008 case Instruction::SIToFP: 7009 case Instruction::SRem: 7010 case Instruction::Sub: 7011 case Instruction::Trunc: 7012 case Instruction::UDiv: 7013 case Instruction::UIToFP: 7014 case Instruction::URem: 7015 case Instruction::Xor: 7016 case Instruction::ZExt: 7017 return true; 7018 } 7019 return false; 7020 }; 7021 7022 if (!IsVectorizableOpcode(I->getOpcode())) 7023 return nullptr; 7024 7025 // Success: widen this instruction. 7026 return new VPWidenRecipe(*I, Plan.mapToVPValues(I->operands())); 7027 } 7028 7029 VPBasicBlock *VPRecipeBuilder::handleReplication( 7030 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 7031 DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe, 7032 VPlanPtr &Plan) { 7033 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 7034 [&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); }, 7035 Range); 7036 7037 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 7038 [&](unsigned VF) { return CM.isScalarWithPredication(I, VF); }, Range); 7039 7040 auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated); 7041 setRecipe(I, Recipe); 7042 7043 // Find if I uses a predicated instruction. If so, it will use its scalar 7044 // value. Avoid hoisting the insert-element which packs the scalar value into 7045 // a vector value, as that happens iff all users use the vector value. 7046 for (auto &Op : I->operands()) 7047 if (auto *PredInst = dyn_cast<Instruction>(Op)) 7048 if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end()) 7049 PredInst2Recipe[PredInst]->setAlsoPack(false); 7050 7051 // Finalize the recipe for Instr, first if it is not predicated. 7052 if (!IsPredicated) { 7053 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 7054 VPBB->appendRecipe(Recipe); 7055 return VPBB; 7056 } 7057 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 7058 assert(VPBB->getSuccessors().empty() && 7059 "VPBB has successors when handling predicated replication."); 7060 // Record predicated instructions for above packing optimizations. 7061 PredInst2Recipe[I] = Recipe; 7062 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 7063 VPBlockUtils::insertBlockAfter(Region, VPBB); 7064 auto *RegSucc = new VPBasicBlock(); 7065 VPBlockUtils::insertBlockAfter(RegSucc, Region); 7066 return RegSucc; 7067 } 7068 7069 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 7070 VPRecipeBase *PredRecipe, 7071 VPlanPtr &Plan) { 7072 // Instructions marked for predication are replicated and placed under an 7073 // if-then construct to prevent side-effects. 7074 7075 // Generate recipes to compute the block mask for this region. 7076 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 7077 7078 // Build the triangular if-then region. 7079 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 7080 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 7081 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 7082 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 7083 auto *PHIRecipe = 7084 Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr); 7085 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 7086 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 7087 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 7088 7089 // Note: first set Entry as region entry and then connect successors starting 7090 // from it in order, to propagate the "parent" of each VPBasicBlock. 7091 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 7092 VPBlockUtils::connectBlocks(Pred, Exit); 7093 7094 return Region; 7095 } 7096 7097 VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 7098 VFRange &Range, 7099 VPlanPtr &Plan) { 7100 // First, check for specific widening recipes that deal with calls, memory 7101 // operations, inductions and Phi nodes. 7102 if (auto *CI = dyn_cast<CallInst>(Instr)) 7103 return tryToWidenCall(CI, Range, *Plan); 7104 7105 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 7106 return tryToWidenMemory(Instr, Range, Plan); 7107 7108 VPRecipeBase *Recipe; 7109 if (auto Phi = dyn_cast<PHINode>(Instr)) { 7110 if (Phi->getParent() != OrigLoop->getHeader()) 7111 return tryToBlend(Phi, Plan); 7112 if ((Recipe = tryToOptimizeInductionPHI(Phi))) 7113 return Recipe; 7114 return new VPWidenPHIRecipe(Phi); 7115 return new VPWidenPHIRecipe(Phi); 7116 } 7117 7118 if (isa<TruncInst>(Instr) && 7119 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Range))) 7120 return Recipe; 7121 7122 if (!shouldWiden(Instr, Range)) 7123 return nullptr; 7124 7125 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 7126 return new VPWidenGEPRecipe(GEP, OrigLoop); 7127 7128 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 7129 bool InvariantCond = 7130 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 7131 return new VPWidenSelectRecipe(*SI, InvariantCond); 7132 } 7133 7134 return tryToWiden(Instr, *Plan); 7135 } 7136 7137 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(unsigned MinVF, 7138 unsigned MaxVF) { 7139 assert(OrigLoop->empty() && "Inner loop expected."); 7140 7141 // Collect conditions feeding internal conditional branches; they need to be 7142 // represented in VPlan for it to model masking. 7143 SmallPtrSet<Value *, 1> NeedDef; 7144 7145 auto *Latch = OrigLoop->getLoopLatch(); 7146 for (BasicBlock *BB : OrigLoop->blocks()) { 7147 if (BB == Latch) 7148 continue; 7149 BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator()); 7150 if (Branch && Branch->isConditional()) 7151 NeedDef.insert(Branch->getCondition()); 7152 } 7153 7154 // If the tail is to be folded by masking, the primary induction variable, if 7155 // exists needs to be represented in VPlan for it to model early-exit masking. 7156 // Also, both the Phi and the live-out instruction of each reduction are 7157 // required in order to introduce a select between them in VPlan. 7158 if (CM.foldTailByMasking()) { 7159 if (Legal->getPrimaryInduction()) 7160 NeedDef.insert(Legal->getPrimaryInduction()); 7161 for (auto &Reduction : Legal->getReductionVars()) { 7162 NeedDef.insert(Reduction.first); 7163 NeedDef.insert(Reduction.second.getLoopExitInstr()); 7164 } 7165 } 7166 7167 // Collect instructions from the original loop that will become trivially dead 7168 // in the vectorized loop. We don't need to vectorize these instructions. For 7169 // example, original induction update instructions can become dead because we 7170 // separately emit induction "steps" when generating code for the new loop. 7171 // Similarly, we create a new latch condition when setting up the structure 7172 // of the new loop, so the old one can become dead. 7173 SmallPtrSet<Instruction *, 4> DeadInstructions; 7174 collectTriviallyDeadInstructions(DeadInstructions); 7175 7176 // Add assume instructions we need to drop to DeadInstructions, to prevent 7177 // them from being added to the VPlan. 7178 // TODO: We only need to drop assumes in blocks that get flattend. If the 7179 // control flow is preserved, we should keep them. 7180 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 7181 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 7182 7183 DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 7184 // Dead instructions do not need sinking. Remove them from SinkAfter. 7185 for (Instruction *I : DeadInstructions) 7186 SinkAfter.erase(I); 7187 7188 for (unsigned VF = MinVF; VF < MaxVF + 1;) { 7189 VFRange SubRange = {VF, MaxVF + 1}; 7190 VPlans.push_back(buildVPlanWithVPRecipes(SubRange, NeedDef, 7191 DeadInstructions, SinkAfter)); 7192 VF = SubRange.End; 7193 } 7194 } 7195 7196 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 7197 VFRange &Range, SmallPtrSetImpl<Value *> &NeedDef, 7198 SmallPtrSetImpl<Instruction *> &DeadInstructions, 7199 const DenseMap<Instruction *, Instruction *> &SinkAfter) { 7200 7201 // Hold a mapping from predicated instructions to their recipes, in order to 7202 // fix their AlsoPack behavior if a user is determined to replicate and use a 7203 // scalar instead of vector value. 7204 DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe; 7205 7206 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 7207 7208 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 7209 7210 // --------------------------------------------------------------------------- 7211 // Pre-construction: record ingredients whose recipes we'll need to further 7212 // process after constructing the initial VPlan. 7213 // --------------------------------------------------------------------------- 7214 7215 // Mark instructions we'll need to sink later and their targets as 7216 // ingredients whose recipe we'll need to record. 7217 for (auto &Entry : SinkAfter) { 7218 RecipeBuilder.recordRecipeOf(Entry.first); 7219 RecipeBuilder.recordRecipeOf(Entry.second); 7220 } 7221 7222 // For each interleave group which is relevant for this (possibly trimmed) 7223 // Range, add it to the set of groups to be later applied to the VPlan and add 7224 // placeholders for its members' Recipes which we'll be replacing with a 7225 // single VPInterleaveRecipe. 7226 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 7227 auto applyIG = [IG, this](unsigned VF) -> bool { 7228 return (VF >= 2 && // Query is illegal for VF == 1 7229 CM.getWideningDecision(IG->getInsertPos(), VF) == 7230 LoopVectorizationCostModel::CM_Interleave); 7231 }; 7232 if (!getDecisionAndClampRange(applyIG, Range)) 7233 continue; 7234 InterleaveGroups.insert(IG); 7235 for (unsigned i = 0; i < IG->getFactor(); i++) 7236 if (Instruction *Member = IG->getMember(i)) 7237 RecipeBuilder.recordRecipeOf(Member); 7238 }; 7239 7240 // --------------------------------------------------------------------------- 7241 // Build initial VPlan: Scan the body of the loop in a topological order to 7242 // visit each basic block after having visited its predecessor basic blocks. 7243 // --------------------------------------------------------------------------- 7244 7245 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 7246 auto Plan = std::make_unique<VPlan>(); 7247 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 7248 Plan->setEntry(VPBB); 7249 7250 // Represent values that will have defs inside VPlan. 7251 for (Value *V : NeedDef) 7252 Plan->addVPValue(V); 7253 7254 // Scan the body of the loop in a topological order to visit each basic block 7255 // after having visited its predecessor basic blocks. 7256 LoopBlocksDFS DFS(OrigLoop); 7257 DFS.perform(LI); 7258 7259 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 7260 // Relevant instructions from basic block BB will be grouped into VPRecipe 7261 // ingredients and fill a new VPBasicBlock. 7262 unsigned VPBBsForBB = 0; 7263 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 7264 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 7265 VPBB = FirstVPBBForBB; 7266 Builder.setInsertPoint(VPBB); 7267 7268 // Introduce each ingredient into VPlan. 7269 // TODO: Model and preserve debug instrinsics in VPlan. 7270 for (Instruction &I : BB->instructionsWithoutDebug()) { 7271 Instruction *Instr = &I; 7272 7273 // First filter out irrelevant instructions, to ensure no recipes are 7274 // built for them. 7275 if (isa<BranchInst>(Instr) || 7276 DeadInstructions.find(Instr) != DeadInstructions.end()) 7277 continue; 7278 7279 if (auto Recipe = 7280 RecipeBuilder.tryToCreateWidenRecipe(Instr, Range, Plan)) { 7281 RecipeBuilder.setRecipe(Instr, Recipe); 7282 VPBB->appendRecipe(Recipe); 7283 continue; 7284 } 7285 7286 // Otherwise, if all widening options failed, Instruction is to be 7287 // replicated. This may create a successor for VPBB. 7288 VPBasicBlock *NextVPBB = RecipeBuilder.handleReplication( 7289 Instr, Range, VPBB, PredInst2Recipe, Plan); 7290 if (NextVPBB != VPBB) { 7291 VPBB = NextVPBB; 7292 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 7293 : ""); 7294 } 7295 } 7296 } 7297 7298 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 7299 // may also be empty, such as the last one VPBB, reflecting original 7300 // basic-blocks with no recipes. 7301 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 7302 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 7303 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 7304 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 7305 delete PreEntry; 7306 7307 // --------------------------------------------------------------------------- 7308 // Transform initial VPlan: Apply previously taken decisions, in order, to 7309 // bring the VPlan to its final state. 7310 // --------------------------------------------------------------------------- 7311 7312 // Apply Sink-After legal constraints. 7313 for (auto &Entry : SinkAfter) { 7314 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 7315 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 7316 Sink->moveAfter(Target); 7317 } 7318 7319 // Interleave memory: for each Interleave Group we marked earlier as relevant 7320 // for this VPlan, replace the Recipes widening its memory instructions with a 7321 // single VPInterleaveRecipe at its insertion point. 7322 for (auto IG : InterleaveGroups) { 7323 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 7324 RecipeBuilder.getRecipe(IG->getInsertPos())); 7325 (new VPInterleaveRecipe(IG, Recipe->getAddr(), Recipe->getMask())) 7326 ->insertBefore(Recipe); 7327 7328 for (unsigned i = 0; i < IG->getFactor(); ++i) 7329 if (Instruction *Member = IG->getMember(i)) { 7330 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 7331 } 7332 } 7333 7334 // Finally, if tail is folded by masking, introduce selects between the phi 7335 // and the live-out instruction of each reduction, at the end of the latch. 7336 if (CM.foldTailByMasking()) { 7337 Builder.setInsertPoint(VPBB); 7338 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 7339 for (auto &Reduction : Legal->getReductionVars()) { 7340 VPValue *Phi = Plan->getVPValue(Reduction.first); 7341 VPValue *Red = Plan->getVPValue(Reduction.second.getLoopExitInstr()); 7342 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 7343 } 7344 } 7345 7346 std::string PlanName; 7347 raw_string_ostream RSO(PlanName); 7348 unsigned VF = Range.Start; 7349 Plan->addVF(VF); 7350 RSO << "Initial VPlan for VF={" << VF; 7351 for (VF *= 2; VF < Range.End; VF *= 2) { 7352 Plan->addVF(VF); 7353 RSO << "," << VF; 7354 } 7355 RSO << "},UF>=1"; 7356 RSO.flush(); 7357 Plan->setName(PlanName); 7358 7359 return Plan; 7360 } 7361 7362 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 7363 // Outer loop handling: They may require CFG and instruction level 7364 // transformations before even evaluating whether vectorization is profitable. 7365 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7366 // the vectorization pipeline. 7367 assert(!OrigLoop->empty()); 7368 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7369 7370 // Create new empty VPlan 7371 auto Plan = std::make_unique<VPlan>(); 7372 7373 // Build hierarchical CFG 7374 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 7375 HCFGBuilder.buildHierarchicalCFG(); 7376 7377 for (unsigned VF = Range.Start; VF < Range.End; VF *= 2) 7378 Plan->addVF(VF); 7379 7380 if (EnableVPlanPredication) { 7381 VPlanPredicator VPP(*Plan); 7382 VPP.predicate(); 7383 7384 // Avoid running transformation to recipes until masked code generation in 7385 // VPlan-native path is in place. 7386 return Plan; 7387 } 7388 7389 SmallPtrSet<Instruction *, 1> DeadInstructions; 7390 VPlanTransforms::VPInstructionsToVPRecipes( 7391 OrigLoop, Plan, Legal->getInductionVars(), DeadInstructions); 7392 return Plan; 7393 } 7394 7395 Value* LoopVectorizationPlanner::VPCallbackILV:: 7396 getOrCreateVectorValues(Value *V, unsigned Part) { 7397 return ILV.getOrCreateVectorValue(V, Part); 7398 } 7399 7400 Value *LoopVectorizationPlanner::VPCallbackILV::getOrCreateScalarValue( 7401 Value *V, const VPIteration &Instance) { 7402 return ILV.getOrCreateScalarValue(V, Instance); 7403 } 7404 7405 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 7406 VPSlotTracker &SlotTracker) const { 7407 O << " +\n" 7408 << Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 7409 IG->getInsertPos()->printAsOperand(O, false); 7410 O << ", "; 7411 getAddr()->printAsOperand(O, SlotTracker); 7412 VPValue *Mask = getMask(); 7413 if (Mask) { 7414 O << ", "; 7415 Mask->printAsOperand(O, SlotTracker); 7416 } 7417 O << "\\l\""; 7418 for (unsigned i = 0; i < IG->getFactor(); ++i) 7419 if (Instruction *I = IG->getMember(i)) 7420 O << " +\n" 7421 << Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\""; 7422 } 7423 7424 void VPWidenCallRecipe::execute(VPTransformState &State) { 7425 State.ILV->widenCallInstruction(Ingredient, User, State); 7426 } 7427 7428 void VPWidenSelectRecipe::execute(VPTransformState &State) { 7429 State.ILV->widenSelectInstruction(Ingredient, InvariantCond); 7430 } 7431 7432 void VPWidenRecipe::execute(VPTransformState &State) { 7433 State.ILV->widenInstruction(Ingredient, User, State); 7434 } 7435 7436 void VPWidenGEPRecipe::execute(VPTransformState &State) { 7437 State.ILV->widenGEP(GEP, State.UF, State.VF, IsPtrLoopInvariant, 7438 IsIndexLoopInvariant); 7439 } 7440 7441 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 7442 assert(!State.Instance && "Int or FP induction being replicated."); 7443 State.ILV->widenIntOrFpInduction(IV, Trunc); 7444 } 7445 7446 void VPWidenPHIRecipe::execute(VPTransformState &State) { 7447 State.ILV->widenPHIInstruction(Phi, State.UF, State.VF); 7448 } 7449 7450 void VPBlendRecipe::execute(VPTransformState &State) { 7451 State.ILV->setDebugLocFromInst(State.Builder, Phi); 7452 // We know that all PHIs in non-header blocks are converted into 7453 // selects, so we don't have to worry about the insertion order and we 7454 // can just use the builder. 7455 // At this point we generate the predication tree. There may be 7456 // duplications since this is a simple recursive scan, but future 7457 // optimizations will clean it up. 7458 7459 unsigned NumIncoming = getNumIncomingValues(); 7460 7461 // Generate a sequence of selects of the form: 7462 // SELECT(Mask3, In3, 7463 // SELECT(Mask2, In2, 7464 // SELECT(Mask1, In1, 7465 // In0))) 7466 // Note that Mask0 is never used: lanes for which no path reaches this phi and 7467 // are essentially undef are taken from In0. 7468 InnerLoopVectorizer::VectorParts Entry(State.UF); 7469 for (unsigned In = 0; In < NumIncoming; ++In) { 7470 for (unsigned Part = 0; Part < State.UF; ++Part) { 7471 // We might have single edge PHIs (blocks) - use an identity 7472 // 'select' for the first PHI operand. 7473 Value *In0 = State.get(getIncomingValue(In), Part); 7474 if (In == 0) 7475 Entry[Part] = In0; // Initialize with the first incoming value. 7476 else { 7477 // Select between the current value and the previous incoming edge 7478 // based on the incoming mask. 7479 Value *Cond = State.get(getMask(In), Part); 7480 Entry[Part] = 7481 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 7482 } 7483 } 7484 } 7485 for (unsigned Part = 0; Part < State.UF; ++Part) 7486 State.ValueMap.setVectorValue(Phi, Part, Entry[Part]); 7487 } 7488 7489 void VPInterleaveRecipe::execute(VPTransformState &State) { 7490 assert(!State.Instance && "Interleave group being replicated."); 7491 State.ILV->vectorizeInterleaveGroup(IG, State, getAddr(), getMask()); 7492 } 7493 7494 void VPReplicateRecipe::execute(VPTransformState &State) { 7495 if (State.Instance) { // Generate a single instance. 7496 State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated); 7497 // Insert scalar instance packing it into a vector. 7498 if (AlsoPack && State.VF > 1) { 7499 // If we're constructing lane 0, initialize to start from undef. 7500 if (State.Instance->Lane == 0) { 7501 Value *Undef = 7502 UndefValue::get(VectorType::get(Ingredient->getType(), State.VF)); 7503 State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef); 7504 } 7505 State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance); 7506 } 7507 return; 7508 } 7509 7510 // Generate scalar instances for all VF lanes of all UF parts, unless the 7511 // instruction is uniform inwhich case generate only the first lane for each 7512 // of the UF parts. 7513 unsigned EndLane = IsUniform ? 1 : State.VF; 7514 for (unsigned Part = 0; Part < State.UF; ++Part) 7515 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 7516 State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated); 7517 } 7518 7519 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 7520 assert(State.Instance && "Branch on Mask works only on single instance."); 7521 7522 unsigned Part = State.Instance->Part; 7523 unsigned Lane = State.Instance->Lane; 7524 7525 Value *ConditionBit = nullptr; 7526 if (!User) // Block in mask is all-one. 7527 ConditionBit = State.Builder.getTrue(); 7528 else { 7529 VPValue *BlockInMask = User->getOperand(0); 7530 ConditionBit = State.get(BlockInMask, Part); 7531 if (ConditionBit->getType()->isVectorTy()) 7532 ConditionBit = State.Builder.CreateExtractElement( 7533 ConditionBit, State.Builder.getInt32(Lane)); 7534 } 7535 7536 // Replace the temporary unreachable terminator with a new conditional branch, 7537 // whose two destinations will be set later when they are created. 7538 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 7539 assert(isa<UnreachableInst>(CurrentTerminator) && 7540 "Expected to replace unreachable terminator with conditional branch."); 7541 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 7542 CondBr->setSuccessor(0, nullptr); 7543 ReplaceInstWithInst(CurrentTerminator, CondBr); 7544 } 7545 7546 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 7547 assert(State.Instance && "Predicated instruction PHI works per instance."); 7548 Instruction *ScalarPredInst = cast<Instruction>( 7549 State.ValueMap.getScalarValue(PredInst, *State.Instance)); 7550 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 7551 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 7552 assert(PredicatingBB && "Predicated block has no single predecessor."); 7553 7554 // By current pack/unpack logic we need to generate only a single phi node: if 7555 // a vector value for the predicated instruction exists at this point it means 7556 // the instruction has vector users only, and a phi for the vector value is 7557 // needed. In this case the recipe of the predicated instruction is marked to 7558 // also do that packing, thereby "hoisting" the insert-element sequence. 7559 // Otherwise, a phi node for the scalar value is needed. 7560 unsigned Part = State.Instance->Part; 7561 if (State.ValueMap.hasVectorValue(PredInst, Part)) { 7562 Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part); 7563 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 7564 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 7565 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 7566 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 7567 State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache. 7568 } else { 7569 Type *PredInstType = PredInst->getType(); 7570 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 7571 Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB); 7572 Phi->addIncoming(ScalarPredInst, PredicatedBB); 7573 State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi); 7574 } 7575 } 7576 7577 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 7578 VPValue *StoredValue = isa<StoreInst>(Instr) ? getStoredValue() : nullptr; 7579 State.ILV->vectorizeMemoryInstruction(&Instr, State, getAddr(), StoredValue, 7580 getMask()); 7581 } 7582 7583 // Determine how to lower the scalar epilogue, which depends on 1) optimising 7584 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 7585 // predication, and 4) a TTI hook that analyses whether the loop is suitable 7586 // for predication. 7587 static ScalarEpilogueLowering getScalarEpilogueLowering( 7588 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 7589 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 7590 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 7591 LoopVectorizationLegality &LVL) { 7592 bool OptSize = 7593 F->hasOptSize() || llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 7594 PGSOQueryType::IRPass); 7595 // 1) OptSize takes precedence over all other options, i.e. if this is set, 7596 // don't look at hints or options, and don't request a scalar epilogue. 7597 if (OptSize && Hints.getForce() != LoopVectorizeHints::FK_Enabled) 7598 return CM_ScalarEpilogueNotAllowedOptSize; 7599 7600 bool PredicateOptDisabled = PreferPredicateOverEpilog.getNumOccurrences() && 7601 !PreferPredicateOverEpilog; 7602 7603 // 2) Next, if disabling predication is requested on the command line, honour 7604 // this and request a scalar epilogue. 7605 if (PredicateOptDisabled) 7606 return CM_ScalarEpilogueAllowed; 7607 7608 // 3) and 4) look if enabling predication is requested on the command line, 7609 // with a loop hint, or if the TTI hook indicates this is profitable, request 7610 // predication . 7611 if (PreferPredicateOverEpilog || 7612 Hints.getPredicate() == LoopVectorizeHints::FK_Enabled || 7613 (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 7614 LVL.getLAI()) && 7615 Hints.getPredicate() != LoopVectorizeHints::FK_Disabled)) 7616 return CM_ScalarEpilogueNotNeededUsePredicate; 7617 7618 return CM_ScalarEpilogueAllowed; 7619 } 7620 7621 // Process the loop in the VPlan-native vectorization path. This path builds 7622 // VPlan upfront in the vectorization pipeline, which allows to apply 7623 // VPlan-to-VPlan transformations from the very beginning without modifying the 7624 // input LLVM IR. 7625 static bool processLoopInVPlanNativePath( 7626 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 7627 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 7628 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 7629 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 7630 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints) { 7631 7632 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 7633 Function *F = L->getHeader()->getParent(); 7634 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 7635 7636 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 7637 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 7638 7639 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 7640 &Hints, IAI); 7641 // Use the planner for outer loop vectorization. 7642 // TODO: CM is not used at this point inside the planner. Turn CM into an 7643 // optional argument if we don't need it in the future. 7644 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE); 7645 7646 // Get user vectorization factor. 7647 const unsigned UserVF = Hints.getWidth(); 7648 7649 // Plan how to best vectorize, return the best VF and its cost. 7650 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 7651 7652 // If we are stress testing VPlan builds, do not attempt to generate vector 7653 // code. Masked vector code generation support will follow soon. 7654 // Also, do not attempt to vectorize if no vector code will be produced. 7655 if (VPlanBuildStressTest || EnableVPlanPredication || 7656 VectorizationFactor::Disabled() == VF) 7657 return false; 7658 7659 LVP.setBestPlan(VF.Width, 1); 7660 7661 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 7662 &CM); 7663 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 7664 << L->getHeader()->getParent()->getName() << "\"\n"); 7665 LVP.executePlan(LB, DT); 7666 7667 // Mark the loop as already vectorized to avoid vectorizing again. 7668 Hints.setAlreadyVectorized(); 7669 7670 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7671 return true; 7672 } 7673 7674 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 7675 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 7676 !EnableLoopInterleaving), 7677 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 7678 !EnableLoopVectorization) {} 7679 7680 bool LoopVectorizePass::processLoop(Loop *L) { 7681 assert((EnableVPlanNativePath || L->empty()) && 7682 "VPlan-native path is not enabled. Only process inner loops."); 7683 7684 #ifndef NDEBUG 7685 const std::string DebugLocStr = getDebugLocString(L); 7686 #endif /* NDEBUG */ 7687 7688 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7689 << L->getHeader()->getParent()->getName() << "\" from " 7690 << DebugLocStr << "\n"); 7691 7692 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 7693 7694 LLVM_DEBUG( 7695 dbgs() << "LV: Loop hints:" 7696 << " force=" 7697 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7698 ? "disabled" 7699 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7700 ? "enabled" 7701 : "?")) 7702 << " width=" << Hints.getWidth() 7703 << " unroll=" << Hints.getInterleave() << "\n"); 7704 7705 // Function containing loop 7706 Function *F = L->getHeader()->getParent(); 7707 7708 // Looking at the diagnostic output is the only way to determine if a loop 7709 // was vectorized (other than looking at the IR or machine code), so it 7710 // is important to generate an optimization remark for each loop. Most of 7711 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7712 // generated as OptimizationRemark and OptimizationRemarkMissed are 7713 // less verbose reporting vectorized loops and unvectorized loops that may 7714 // benefit from vectorization, respectively. 7715 7716 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 7717 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7718 return false; 7719 } 7720 7721 PredicatedScalarEvolution PSE(*SE, *L); 7722 7723 // Check if it is legal to vectorize the loop. 7724 LoopVectorizationRequirements Requirements(*ORE); 7725 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 7726 &Requirements, &Hints, DB, AC); 7727 if (!LVL.canVectorize(EnableVPlanNativePath)) { 7728 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7729 Hints.emitRemarkWithHints(); 7730 return false; 7731 } 7732 7733 // Check the function attributes and profiles to find out if this function 7734 // should be optimized for size. 7735 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 7736 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 7737 7738 // Entrance to the VPlan-native vectorization path. Outer loops are processed 7739 // here. They may require CFG and instruction level transformations before 7740 // even evaluating whether vectorization is profitable. Since we cannot modify 7741 // the incoming IR, we need to build VPlan upfront in the vectorization 7742 // pipeline. 7743 if (!L->empty()) 7744 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 7745 ORE, BFI, PSI, Hints); 7746 7747 assert(L->empty() && "Inner loop expected."); 7748 7749 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 7750 // count by optimizing for size, to minimize overheads. 7751 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 7752 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 7753 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7754 << "This loop is worth vectorizing only if no scalar " 7755 << "iteration overheads are incurred."); 7756 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7757 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7758 else { 7759 LLVM_DEBUG(dbgs() << "\n"); 7760 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 7761 } 7762 } 7763 7764 // Check the function attributes to see if implicit floats are allowed. 7765 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7766 // an integer loop and the vector instructions selected are purely integer 7767 // vector instructions? 7768 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7769 reportVectorizationFailure( 7770 "Can't vectorize when the NoImplicitFloat attribute is used", 7771 "loop not vectorized due to NoImplicitFloat attribute", 7772 "NoImplicitFloat", ORE, L); 7773 Hints.emitRemarkWithHints(); 7774 return false; 7775 } 7776 7777 // Check if the target supports potentially unsafe FP vectorization. 7778 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7779 // for the target we're vectorizing for, to make sure none of the 7780 // additional fp-math flags can help. 7781 if (Hints.isPotentiallyUnsafe() && 7782 TTI->isFPVectorizationPotentiallyUnsafe()) { 7783 reportVectorizationFailure( 7784 "Potentially unsafe FP op prevents vectorization", 7785 "loop not vectorized due to unsafe FP support.", 7786 "UnsafeFP", ORE, L); 7787 Hints.emitRemarkWithHints(); 7788 return false; 7789 } 7790 7791 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 7792 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 7793 7794 // If an override option has been passed in for interleaved accesses, use it. 7795 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 7796 UseInterleaved = EnableInterleavedMemAccesses; 7797 7798 // Analyze interleaved memory accesses. 7799 if (UseInterleaved) { 7800 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 7801 } 7802 7803 // Use the cost model. 7804 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 7805 F, &Hints, IAI); 7806 CM.collectValuesToIgnore(); 7807 7808 // Use the planner for vectorization. 7809 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE); 7810 7811 // Get user vectorization factor. 7812 unsigned UserVF = Hints.getWidth(); 7813 7814 // Plan how to best vectorize, return the best VF and its cost. 7815 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF); 7816 7817 VectorizationFactor VF = VectorizationFactor::Disabled(); 7818 unsigned IC = 1; 7819 unsigned UserIC = Hints.getInterleave(); 7820 7821 if (MaybeVF) { 7822 VF = *MaybeVF; 7823 // Select the interleave count. 7824 IC = CM.selectInterleaveCount(VF.Width, VF.Cost); 7825 } 7826 7827 // Identify the diagnostic messages that should be produced. 7828 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7829 bool VectorizeLoop = true, InterleaveLoop = true; 7830 if (Requirements.doesNotMeet(F, L, Hints)) { 7831 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7832 "requirements.\n"); 7833 Hints.emitRemarkWithHints(); 7834 return false; 7835 } 7836 7837 if (VF.Width == 1) { 7838 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7839 VecDiagMsg = std::make_pair( 7840 "VectorizationNotBeneficial", 7841 "the cost-model indicates that vectorization is not beneficial"); 7842 VectorizeLoop = false; 7843 } 7844 7845 if (!MaybeVF && UserIC > 1) { 7846 // Tell the user interleaving was avoided up-front, despite being explicitly 7847 // requested. 7848 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 7849 "interleaving should be avoided up front\n"); 7850 IntDiagMsg = std::make_pair( 7851 "InterleavingAvoided", 7852 "Ignoring UserIC, because interleaving was avoided up front"); 7853 InterleaveLoop = false; 7854 } else if (IC == 1 && UserIC <= 1) { 7855 // Tell the user interleaving is not beneficial. 7856 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7857 IntDiagMsg = std::make_pair( 7858 "InterleavingNotBeneficial", 7859 "the cost-model indicates that interleaving is not beneficial"); 7860 InterleaveLoop = false; 7861 if (UserIC == 1) { 7862 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7863 IntDiagMsg.second += 7864 " and is explicitly disabled or interleave count is set to 1"; 7865 } 7866 } else if (IC > 1 && UserIC == 1) { 7867 // Tell the user interleaving is beneficial, but it explicitly disabled. 7868 LLVM_DEBUG( 7869 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 7870 IntDiagMsg = std::make_pair( 7871 "InterleavingBeneficialButDisabled", 7872 "the cost-model indicates that interleaving is beneficial " 7873 "but is explicitly disabled or interleave count is set to 1"); 7874 InterleaveLoop = false; 7875 } 7876 7877 // Override IC if user provided an interleave count. 7878 IC = UserIC > 0 ? UserIC : IC; 7879 7880 // Emit diagnostic messages, if any. 7881 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7882 if (!VectorizeLoop && !InterleaveLoop) { 7883 // Do not vectorize or interleaving the loop. 7884 ORE->emit([&]() { 7885 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 7886 L->getStartLoc(), L->getHeader()) 7887 << VecDiagMsg.second; 7888 }); 7889 ORE->emit([&]() { 7890 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 7891 L->getStartLoc(), L->getHeader()) 7892 << IntDiagMsg.second; 7893 }); 7894 return false; 7895 } else if (!VectorizeLoop && InterleaveLoop) { 7896 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7897 ORE->emit([&]() { 7898 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7899 L->getStartLoc(), L->getHeader()) 7900 << VecDiagMsg.second; 7901 }); 7902 } else if (VectorizeLoop && !InterleaveLoop) { 7903 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7904 << ") in " << DebugLocStr << '\n'); 7905 ORE->emit([&]() { 7906 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7907 L->getStartLoc(), L->getHeader()) 7908 << IntDiagMsg.second; 7909 }); 7910 } else if (VectorizeLoop && InterleaveLoop) { 7911 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 7912 << ") in " << DebugLocStr << '\n'); 7913 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7914 } 7915 7916 LVP.setBestPlan(VF.Width, IC); 7917 7918 using namespace ore; 7919 bool DisableRuntimeUnroll = false; 7920 MDNode *OrigLoopID = L->getLoopID(); 7921 7922 if (!VectorizeLoop) { 7923 assert(IC > 1 && "interleave count should not be 1 or 0"); 7924 // If we decided that it is not legal to vectorize the loop, then 7925 // interleave it. 7926 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7927 &CM); 7928 LVP.executePlan(Unroller, DT); 7929 7930 ORE->emit([&]() { 7931 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7932 L->getHeader()) 7933 << "interleaved loop (interleaved count: " 7934 << NV("InterleaveCount", IC) << ")"; 7935 }); 7936 } else { 7937 // If we decided that it is *legal* to vectorize the loop, then do it. 7938 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7939 &LVL, &CM); 7940 LVP.executePlan(LB, DT); 7941 ++LoopsVectorized; 7942 7943 // Add metadata to disable runtime unrolling a scalar loop when there are 7944 // no runtime checks about strides and memory. A scalar loop that is 7945 // rarely used is not worth unrolling. 7946 if (!LB.areSafetyChecksAdded()) 7947 DisableRuntimeUnroll = true; 7948 7949 // Report the vectorization decision. 7950 ORE->emit([&]() { 7951 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7952 L->getHeader()) 7953 << "vectorized loop (vectorization width: " 7954 << NV("VectorizationFactor", VF.Width) 7955 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 7956 }); 7957 } 7958 7959 Optional<MDNode *> RemainderLoopID = 7960 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7961 LLVMLoopVectorizeFollowupEpilogue}); 7962 if (RemainderLoopID.hasValue()) { 7963 L->setLoopID(RemainderLoopID.getValue()); 7964 } else { 7965 if (DisableRuntimeUnroll) 7966 AddRuntimeUnrollDisableMetaData(L); 7967 7968 // Mark the loop as already vectorized to avoid vectorizing again. 7969 Hints.setAlreadyVectorized(); 7970 } 7971 7972 LLVM_DEBUG(verifyFunction(*L->getHeader()->getParent())); 7973 return true; 7974 } 7975 7976 LoopVectorizeResult LoopVectorizePass::runImpl( 7977 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7978 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7979 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7980 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7981 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 7982 SE = &SE_; 7983 LI = &LI_; 7984 TTI = &TTI_; 7985 DT = &DT_; 7986 BFI = &BFI_; 7987 TLI = TLI_; 7988 AA = &AA_; 7989 AC = &AC_; 7990 GetLAA = &GetLAA_; 7991 DB = &DB_; 7992 ORE = &ORE_; 7993 PSI = PSI_; 7994 7995 // Don't attempt if 7996 // 1. the target claims to have no vector registers, and 7997 // 2. interleaving won't help ILP. 7998 // 7999 // The second condition is necessary because, even if the target has no 8000 // vector registers, loop vectorization may still enable scalar 8001 // interleaving. 8002 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 8003 TTI->getMaxInterleaveFactor(1) < 2) 8004 return LoopVectorizeResult(false, false); 8005 8006 bool Changed = false, CFGChanged = false; 8007 8008 // The vectorizer requires loops to be in simplified form. 8009 // Since simplification may add new inner loops, it has to run before the 8010 // legality and profitability checks. This means running the loop vectorizer 8011 // will simplify all loops, regardless of whether anything end up being 8012 // vectorized. 8013 for (auto &L : *LI) 8014 Changed |= CFGChanged |= 8015 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 8016 8017 // Build up a worklist of inner-loops to vectorize. This is necessary as 8018 // the act of vectorizing or partially unrolling a loop creates new loops 8019 // and can invalidate iterators across the loops. 8020 SmallVector<Loop *, 8> Worklist; 8021 8022 for (Loop *L : *LI) 8023 collectSupportedLoops(*L, LI, ORE, Worklist); 8024 8025 LoopsAnalyzed += Worklist.size(); 8026 8027 // Now walk the identified inner loops. 8028 while (!Worklist.empty()) { 8029 Loop *L = Worklist.pop_back_val(); 8030 8031 // For the inner loops we actually process, form LCSSA to simplify the 8032 // transform. 8033 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 8034 8035 Changed |= CFGChanged |= processLoop(L); 8036 } 8037 8038 // Process each loop nest in the function. 8039 return LoopVectorizeResult(Changed, CFGChanged); 8040 } 8041 8042 PreservedAnalyses LoopVectorizePass::run(Function &F, 8043 FunctionAnalysisManager &AM) { 8044 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 8045 auto &LI = AM.getResult<LoopAnalysis>(F); 8046 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 8047 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 8048 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 8049 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 8050 auto &AA = AM.getResult<AAManager>(F); 8051 auto &AC = AM.getResult<AssumptionAnalysis>(F); 8052 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 8053 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 8054 MemorySSA *MSSA = EnableMSSALoopDependency 8055 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 8056 : nullptr; 8057 8058 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 8059 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 8060 [&](Loop &L) -> const LoopAccessInfo & { 8061 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA}; 8062 return LAM.getResult<LoopAccessAnalysis>(L, AR); 8063 }; 8064 const ModuleAnalysisManager &MAM = 8065 AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager(); 8066 ProfileSummaryInfo *PSI = 8067 MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 8068 LoopVectorizeResult Result = 8069 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 8070 if (!Result.MadeAnyChange) 8071 return PreservedAnalyses::all(); 8072 PreservedAnalyses PA; 8073 8074 // We currently do not preserve loopinfo/dominator analyses with outer loop 8075 // vectorization. Until this is addressed, mark these analyses as preserved 8076 // only for non-VPlan-native path. 8077 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 8078 if (!EnableVPlanNativePath) { 8079 PA.preserve<LoopAnalysis>(); 8080 PA.preserve<DominatorTreeAnalysis>(); 8081 } 8082 PA.preserve<BasicAA>(); 8083 PA.preserve<GlobalsAA>(); 8084 if (!Result.MadeCFGChange) 8085 PA.preserveSet<CFGAnalyses>(); 8086 return PA; 8087 } 8088