1 //===- LoopVectorizationLegality.cpp --------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides loop vectorization legality analysis. Original code
10 // resided in LoopVectorize.cpp for a long time.
11 //
12 // At this point, it is implemented as a utility class, not as an analysis
13 // pass. It should be easy to create an analysis pass around it if there
14 // is a need (but D45420 needs to happen first).
15 //
16
17 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Transforms/Utils/SizeOpts.h"
28 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
29
30 using namespace llvm;
31 using namespace PatternMatch;
32
33 #define LV_NAME "loop-vectorize"
34 #define DEBUG_TYPE LV_NAME
35
36 static cl::opt<bool>
37 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
38 cl::desc("Enable if-conversion during vectorization."));
39
40 namespace llvm {
41 cl::opt<bool>
42 HintsAllowReordering("hints-allow-reordering", cl::init(true), cl::Hidden,
43 cl::desc("Allow enabling loop hints to reorder "
44 "FP operations during vectorization."));
45 }
46
47 // TODO: Move size-based thresholds out of legality checking, make cost based
48 // decisions instead of hard thresholds.
49 static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
50 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
51 cl::desc("The maximum number of SCEV checks allowed."));
52
53 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
54 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
55 cl::desc("The maximum number of SCEV checks allowed with a "
56 "vectorize(enable) pragma"));
57
58 static cl::opt<LoopVectorizeHints::ScalableForceKind>
59 ForceScalableVectorization(
60 "scalable-vectorization", cl::init(LoopVectorizeHints::SK_Unspecified),
61 cl::Hidden,
62 cl::desc("Control whether the compiler can use scalable vectors to "
63 "vectorize a loop"),
64 cl::values(
65 clEnumValN(LoopVectorizeHints::SK_FixedWidthOnly, "off",
66 "Scalable vectorization is disabled."),
67 clEnumValN(
68 LoopVectorizeHints::SK_PreferScalable, "preferred",
69 "Scalable vectorization is available and favored when the "
70 "cost is inconclusive."),
71 clEnumValN(
72 LoopVectorizeHints::SK_PreferScalable, "on",
73 "Scalable vectorization is available and favored when the "
74 "cost is inconclusive.")));
75
76 /// Maximum vectorization interleave count.
77 static const unsigned MaxInterleaveFactor = 16;
78
79 namespace llvm {
80
validate(unsigned Val)81 bool LoopVectorizeHints::Hint::validate(unsigned Val) {
82 switch (Kind) {
83 case HK_WIDTH:
84 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
85 case HK_INTERLEAVE:
86 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
87 case HK_FORCE:
88 return (Val <= 1);
89 case HK_ISVECTORIZED:
90 case HK_PREDICATE:
91 case HK_SCALABLE:
92 return (Val == 0 || Val == 1);
93 }
94 return false;
95 }
96
LoopVectorizeHints(const Loop * L,bool InterleaveOnlyWhenForced,OptimizationRemarkEmitter & ORE,const TargetTransformInfo * TTI)97 LoopVectorizeHints::LoopVectorizeHints(const Loop *L,
98 bool InterleaveOnlyWhenForced,
99 OptimizationRemarkEmitter &ORE,
100 const TargetTransformInfo *TTI)
101 : Width("vectorize.width", VectorizerParams::VectorizationFactor, HK_WIDTH),
102 Interleave("interleave.count", InterleaveOnlyWhenForced, HK_INTERLEAVE),
103 Force("vectorize.enable", FK_Undefined, HK_FORCE),
104 IsVectorized("isvectorized", 0, HK_ISVECTORIZED),
105 Predicate("vectorize.predicate.enable", FK_Undefined, HK_PREDICATE),
106 Scalable("vectorize.scalable.enable", SK_Unspecified, HK_SCALABLE),
107 TheLoop(L), ORE(ORE) {
108 // Populate values with existing loop metadata.
109 getHintsFromMetadata();
110
111 // force-vector-interleave overrides DisableInterleaving.
112 if (VectorizerParams::isInterleaveForced())
113 Interleave.Value = VectorizerParams::VectorizationInterleave;
114
115 // If the metadata doesn't explicitly specify whether to enable scalable
116 // vectorization, then decide based on the following criteria (increasing
117 // level of priority):
118 // - Target default
119 // - Metadata width
120 // - Force option (always overrides)
121 if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified) {
122 if (TTI)
123 Scalable.Value = TTI->enableScalableVectorization() ? SK_PreferScalable
124 : SK_FixedWidthOnly;
125
126 if (Width.Value)
127 // If the width is set, but the metadata says nothing about the scalable
128 // property, then assume it concerns only a fixed-width UserVF.
129 // If width is not set, the flag takes precedence.
130 Scalable.Value = SK_FixedWidthOnly;
131 }
132
133 // If the flag is set to force any use of scalable vectors, override the loop
134 // hints.
135 if (ForceScalableVectorization.getValue() !=
136 LoopVectorizeHints::SK_Unspecified)
137 Scalable.Value = ForceScalableVectorization.getValue();
138
139 // Scalable vectorization is disabled if no preference is specified.
140 if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified)
141 Scalable.Value = SK_FixedWidthOnly;
142
143 if (IsVectorized.Value != 1)
144 // If the vectorization width and interleaving count are both 1 then
145 // consider the loop to have been already vectorized because there's
146 // nothing more that we can do.
147 IsVectorized.Value =
148 getWidth() == ElementCount::getFixed(1) && getInterleave() == 1;
149 LLVM_DEBUG(if (InterleaveOnlyWhenForced && getInterleave() == 1) dbgs()
150 << "LV: Interleaving disabled by the pass manager\n");
151 }
152
setAlreadyVectorized()153 void LoopVectorizeHints::setAlreadyVectorized() {
154 LLVMContext &Context = TheLoop->getHeader()->getContext();
155
156 MDNode *IsVectorizedMD = MDNode::get(
157 Context,
158 {MDString::get(Context, "llvm.loop.isvectorized"),
159 ConstantAsMetadata::get(ConstantInt::get(Context, APInt(32, 1)))});
160 MDNode *LoopID = TheLoop->getLoopID();
161 MDNode *NewLoopID =
162 makePostTransformationMetadata(Context, LoopID,
163 {Twine(Prefix(), "vectorize.").str(),
164 Twine(Prefix(), "interleave.").str()},
165 {IsVectorizedMD});
166 TheLoop->setLoopID(NewLoopID);
167
168 // Update internal cache.
169 IsVectorized.Value = 1;
170 }
171
allowVectorization(Function * F,Loop * L,bool VectorizeOnlyWhenForced) const172 bool LoopVectorizeHints::allowVectorization(
173 Function *F, Loop *L, bool VectorizeOnlyWhenForced) const {
174 if (getForce() == LoopVectorizeHints::FK_Disabled) {
175 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
176 emitRemarkWithHints();
177 return false;
178 }
179
180 if (VectorizeOnlyWhenForced && getForce() != LoopVectorizeHints::FK_Enabled) {
181 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
182 emitRemarkWithHints();
183 return false;
184 }
185
186 if (getIsVectorized() == 1) {
187 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
188 // FIXME: Add interleave.disable metadata. This will allow
189 // vectorize.disable to be used without disabling the pass and errors
190 // to differentiate between disabled vectorization and a width of 1.
191 ORE.emit([&]() {
192 return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(),
193 "AllDisabled", L->getStartLoc(),
194 L->getHeader())
195 << "loop not vectorized: vectorization and interleaving are "
196 "explicitly disabled, or the loop has already been "
197 "vectorized";
198 });
199 return false;
200 }
201
202 return true;
203 }
204
emitRemarkWithHints() const205 void LoopVectorizeHints::emitRemarkWithHints() const {
206 using namespace ore;
207
208 ORE.emit([&]() {
209 if (Force.Value == LoopVectorizeHints::FK_Disabled)
210 return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
211 TheLoop->getStartLoc(),
212 TheLoop->getHeader())
213 << "loop not vectorized: vectorization is explicitly disabled";
214 else {
215 OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
216 TheLoop->getStartLoc(), TheLoop->getHeader());
217 R << "loop not vectorized";
218 if (Force.Value == LoopVectorizeHints::FK_Enabled) {
219 R << " (Force=" << NV("Force", true);
220 if (Width.Value != 0)
221 R << ", Vector Width=" << NV("VectorWidth", getWidth());
222 if (getInterleave() != 0)
223 R << ", Interleave Count=" << NV("InterleaveCount", getInterleave());
224 R << ")";
225 }
226 return R;
227 }
228 });
229 }
230
vectorizeAnalysisPassName() const231 const char *LoopVectorizeHints::vectorizeAnalysisPassName() const {
232 if (getWidth() == ElementCount::getFixed(1))
233 return LV_NAME;
234 if (getForce() == LoopVectorizeHints::FK_Disabled)
235 return LV_NAME;
236 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth().isZero())
237 return LV_NAME;
238 return OptimizationRemarkAnalysis::AlwaysPrint;
239 }
240
allowReordering() const241 bool LoopVectorizeHints::allowReordering() const {
242 // Allow the vectorizer to change the order of operations if enabling
243 // loop hints are provided
244 ElementCount EC = getWidth();
245 return HintsAllowReordering &&
246 (getForce() == LoopVectorizeHints::FK_Enabled ||
247 EC.getKnownMinValue() > 1);
248 }
249
getHintsFromMetadata()250 void LoopVectorizeHints::getHintsFromMetadata() {
251 MDNode *LoopID = TheLoop->getLoopID();
252 if (!LoopID)
253 return;
254
255 // First operand should refer to the loop id itself.
256 assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
257 assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
258
259 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
260 const MDString *S = nullptr;
261 SmallVector<Metadata *, 4> Args;
262
263 // The expected hint is either a MDString or a MDNode with the first
264 // operand a MDString.
265 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) {
266 if (!MD || MD->getNumOperands() == 0)
267 continue;
268 S = dyn_cast<MDString>(MD->getOperand(0));
269 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i)
270 Args.push_back(MD->getOperand(i));
271 } else {
272 S = dyn_cast<MDString>(LoopID->getOperand(i));
273 assert(Args.size() == 0 && "too many arguments for MDString");
274 }
275
276 if (!S)
277 continue;
278
279 // Check if the hint starts with the loop metadata prefix.
280 StringRef Name = S->getString();
281 if (Args.size() == 1)
282 setHint(Name, Args[0]);
283 }
284 }
285
setHint(StringRef Name,Metadata * Arg)286 void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) {
287 if (!Name.startswith(Prefix()))
288 return;
289 Name = Name.substr(Prefix().size(), StringRef::npos);
290
291 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
292 if (!C)
293 return;
294 unsigned Val = C->getZExtValue();
295
296 Hint *Hints[] = {&Width, &Interleave, &Force,
297 &IsVectorized, &Predicate, &Scalable};
298 for (auto H : Hints) {
299 if (Name == H->Name) {
300 if (H->validate(Val))
301 H->Value = Val;
302 else
303 LLVM_DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
304 break;
305 }
306 }
307 }
308
309 // Return true if the inner loop \p Lp is uniform with regard to the outer loop
310 // \p OuterLp (i.e., if the outer loop is vectorized, all the vector lanes
311 // executing the inner loop will execute the same iterations). This check is
312 // very constrained for now but it will be relaxed in the future. \p Lp is
313 // considered uniform if it meets all the following conditions:
314 // 1) it has a canonical IV (starting from 0 and with stride 1),
315 // 2) its latch terminator is a conditional branch and,
316 // 3) its latch condition is a compare instruction whose operands are the
317 // canonical IV and an OuterLp invariant.
318 // This check doesn't take into account the uniformity of other conditions not
319 // related to the loop latch because they don't affect the loop uniformity.
320 //
321 // NOTE: We decided to keep all these checks and its associated documentation
322 // together so that we can easily have a picture of the current supported loop
323 // nests. However, some of the current checks don't depend on \p OuterLp and
324 // would be redundantly executed for each \p Lp if we invoked this function for
325 // different candidate outer loops. This is not the case for now because we
326 // don't currently have the infrastructure to evaluate multiple candidate outer
327 // loops and \p OuterLp will be a fixed parameter while we only support explicit
328 // outer loop vectorization. It's also very likely that these checks go away
329 // before introducing the aforementioned infrastructure. However, if this is not
330 // the case, we should move the \p OuterLp independent checks to a separate
331 // function that is only executed once for each \p Lp.
isUniformLoop(Loop * Lp,Loop * OuterLp)332 static bool isUniformLoop(Loop *Lp, Loop *OuterLp) {
333 assert(Lp->getLoopLatch() && "Expected loop with a single latch.");
334
335 // If Lp is the outer loop, it's uniform by definition.
336 if (Lp == OuterLp)
337 return true;
338 assert(OuterLp->contains(Lp) && "OuterLp must contain Lp.");
339
340 // 1.
341 PHINode *IV = Lp->getCanonicalInductionVariable();
342 if (!IV) {
343 LLVM_DEBUG(dbgs() << "LV: Canonical IV not found.\n");
344 return false;
345 }
346
347 // 2.
348 BasicBlock *Latch = Lp->getLoopLatch();
349 auto *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator());
350 if (!LatchBr || LatchBr->isUnconditional()) {
351 LLVM_DEBUG(dbgs() << "LV: Unsupported loop latch branch.\n");
352 return false;
353 }
354
355 // 3.
356 auto *LatchCmp = dyn_cast<CmpInst>(LatchBr->getCondition());
357 if (!LatchCmp) {
358 LLVM_DEBUG(
359 dbgs() << "LV: Loop latch condition is not a compare instruction.\n");
360 return false;
361 }
362
363 Value *CondOp0 = LatchCmp->getOperand(0);
364 Value *CondOp1 = LatchCmp->getOperand(1);
365 Value *IVUpdate = IV->getIncomingValueForBlock(Latch);
366 if (!(CondOp0 == IVUpdate && OuterLp->isLoopInvariant(CondOp1)) &&
367 !(CondOp1 == IVUpdate && OuterLp->isLoopInvariant(CondOp0))) {
368 LLVM_DEBUG(dbgs() << "LV: Loop latch condition is not uniform.\n");
369 return false;
370 }
371
372 return true;
373 }
374
375 // Return true if \p Lp and all its nested loops are uniform with regard to \p
376 // OuterLp.
isUniformLoopNest(Loop * Lp,Loop * OuterLp)377 static bool isUniformLoopNest(Loop *Lp, Loop *OuterLp) {
378 if (!isUniformLoop(Lp, OuterLp))
379 return false;
380
381 // Check if nested loops are uniform.
382 for (Loop *SubLp : *Lp)
383 if (!isUniformLoopNest(SubLp, OuterLp))
384 return false;
385
386 return true;
387 }
388
convertPointerToIntegerType(const DataLayout & DL,Type * Ty)389 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
390 if (Ty->isPointerTy())
391 return DL.getIntPtrType(Ty);
392
393 // It is possible that char's or short's overflow when we ask for the loop's
394 // trip count, work around this by changing the type size.
395 if (Ty->getScalarSizeInBits() < 32)
396 return Type::getInt32Ty(Ty->getContext());
397
398 return Ty;
399 }
400
getWiderType(const DataLayout & DL,Type * Ty0,Type * Ty1)401 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
402 Ty0 = convertPointerToIntegerType(DL, Ty0);
403 Ty1 = convertPointerToIntegerType(DL, Ty1);
404 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
405 return Ty0;
406 return Ty1;
407 }
408
409 /// Check that the instruction has outside loop users and is not an
410 /// identified reduction variable.
hasOutsideLoopUser(const Loop * TheLoop,Instruction * Inst,SmallPtrSetImpl<Value * > & AllowedExit)411 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
412 SmallPtrSetImpl<Value *> &AllowedExit) {
413 // Reductions, Inductions and non-header phis are allowed to have exit users. All
414 // other instructions must not have external users.
415 if (!AllowedExit.count(Inst))
416 // Check that all of the users of the loop are inside the BB.
417 for (User *U : Inst->users()) {
418 Instruction *UI = cast<Instruction>(U);
419 // This user may be a reduction exit value.
420 if (!TheLoop->contains(UI)) {
421 LLVM_DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
422 return true;
423 }
424 }
425 return false;
426 }
427
428 /// Returns true if A and B have same pointer operands or same SCEVs addresses
storeToSameAddress(ScalarEvolution * SE,StoreInst * A,StoreInst * B)429 static bool storeToSameAddress(ScalarEvolution *SE, StoreInst *A,
430 StoreInst *B) {
431 // Compare store
432 if (A == B)
433 return true;
434
435 // Otherwise Compare pointers
436 Value *APtr = A->getPointerOperand();
437 Value *BPtr = B->getPointerOperand();
438 if (APtr == BPtr)
439 return true;
440
441 // Otherwise compare address SCEVs
442 if (SE->getSCEV(APtr) == SE->getSCEV(BPtr))
443 return true;
444
445 return false;
446 }
447
isConsecutivePtr(Type * AccessTy,Value * Ptr) const448 int LoopVectorizationLegality::isConsecutivePtr(Type *AccessTy,
449 Value *Ptr) const {
450 const ValueToValueMap &Strides =
451 getSymbolicStrides() ? *getSymbolicStrides() : ValueToValueMap();
452
453 Function *F = TheLoop->getHeader()->getParent();
454 bool OptForSize = F->hasOptSize() ||
455 llvm::shouldOptimizeForSize(TheLoop->getHeader(), PSI, BFI,
456 PGSOQueryType::IRPass);
457 bool CanAddPredicate = !OptForSize;
458 int Stride = getPtrStride(PSE, AccessTy, Ptr, TheLoop, Strides,
459 CanAddPredicate, false);
460 if (Stride == 1 || Stride == -1)
461 return Stride;
462 return 0;
463 }
464
isUniform(Value * V)465 bool LoopVectorizationLegality::isUniform(Value *V) {
466 return LAI->isUniform(V);
467 }
468
canVectorizeOuterLoop()469 bool LoopVectorizationLegality::canVectorizeOuterLoop() {
470 assert(!TheLoop->isInnermost() && "We are not vectorizing an outer loop.");
471 // Store the result and return it at the end instead of exiting early, in case
472 // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
473 bool Result = true;
474 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
475
476 for (BasicBlock *BB : TheLoop->blocks()) {
477 // Check whether the BB terminator is a BranchInst. Any other terminator is
478 // not supported yet.
479 auto *Br = dyn_cast<BranchInst>(BB->getTerminator());
480 if (!Br) {
481 reportVectorizationFailure("Unsupported basic block terminator",
482 "loop control flow is not understood by vectorizer",
483 "CFGNotUnderstood", ORE, TheLoop);
484 if (DoExtraAnalysis)
485 Result = false;
486 else
487 return false;
488 }
489
490 // Check whether the BranchInst is a supported one. Only unconditional
491 // branches, conditional branches with an outer loop invariant condition or
492 // backedges are supported.
493 // FIXME: We skip these checks when VPlan predication is enabled as we
494 // want to allow divergent branches. This whole check will be removed
495 // once VPlan predication is on by default.
496 if (Br && Br->isConditional() &&
497 !TheLoop->isLoopInvariant(Br->getCondition()) &&
498 !LI->isLoopHeader(Br->getSuccessor(0)) &&
499 !LI->isLoopHeader(Br->getSuccessor(1))) {
500 reportVectorizationFailure("Unsupported conditional branch",
501 "loop control flow is not understood by vectorizer",
502 "CFGNotUnderstood", ORE, TheLoop);
503 if (DoExtraAnalysis)
504 Result = false;
505 else
506 return false;
507 }
508 }
509
510 // Check whether inner loops are uniform. At this point, we only support
511 // simple outer loops scenarios with uniform nested loops.
512 if (!isUniformLoopNest(TheLoop /*loop nest*/,
513 TheLoop /*context outer loop*/)) {
514 reportVectorizationFailure("Outer loop contains divergent loops",
515 "loop control flow is not understood by vectorizer",
516 "CFGNotUnderstood", ORE, TheLoop);
517 if (DoExtraAnalysis)
518 Result = false;
519 else
520 return false;
521 }
522
523 // Check whether we are able to set up outer loop induction.
524 if (!setupOuterLoopInductions()) {
525 reportVectorizationFailure("Unsupported outer loop Phi(s)",
526 "Unsupported outer loop Phi(s)",
527 "UnsupportedPhi", ORE, TheLoop);
528 if (DoExtraAnalysis)
529 Result = false;
530 else
531 return false;
532 }
533
534 return Result;
535 }
536
addInductionPhi(PHINode * Phi,const InductionDescriptor & ID,SmallPtrSetImpl<Value * > & AllowedExit)537 void LoopVectorizationLegality::addInductionPhi(
538 PHINode *Phi, const InductionDescriptor &ID,
539 SmallPtrSetImpl<Value *> &AllowedExit) {
540 Inductions[Phi] = ID;
541
542 // In case this induction also comes with casts that we know we can ignore
543 // in the vectorized loop body, record them here. All casts could be recorded
544 // here for ignoring, but suffices to record only the first (as it is the
545 // only one that may bw used outside the cast sequence).
546 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
547 if (!Casts.empty())
548 InductionCastsToIgnore.insert(*Casts.begin());
549
550 Type *PhiTy = Phi->getType();
551 const DataLayout &DL = Phi->getModule()->getDataLayout();
552
553 // Get the widest type.
554 if (!PhiTy->isFloatingPointTy()) {
555 if (!WidestIndTy)
556 WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
557 else
558 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
559 }
560
561 // Int inductions are special because we only allow one IV.
562 if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
563 ID.getConstIntStepValue() && ID.getConstIntStepValue()->isOne() &&
564 isa<Constant>(ID.getStartValue()) &&
565 cast<Constant>(ID.getStartValue())->isNullValue()) {
566
567 // Use the phi node with the widest type as induction. Use the last
568 // one if there are multiple (no good reason for doing this other
569 // than it is expedient). We've checked that it begins at zero and
570 // steps by one, so this is a canonical induction variable.
571 if (!PrimaryInduction || PhiTy == WidestIndTy)
572 PrimaryInduction = Phi;
573 }
574
575 // Both the PHI node itself, and the "post-increment" value feeding
576 // back into the PHI node may have external users.
577 // We can allow those uses, except if the SCEVs we have for them rely
578 // on predicates that only hold within the loop, since allowing the exit
579 // currently means re-using this SCEV outside the loop (see PR33706 for more
580 // details).
581 if (PSE.getPredicate().isAlwaysTrue()) {
582 AllowedExit.insert(Phi);
583 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
584 }
585
586 LLVM_DEBUG(dbgs() << "LV: Found an induction variable.\n");
587 }
588
setupOuterLoopInductions()589 bool LoopVectorizationLegality::setupOuterLoopInductions() {
590 BasicBlock *Header = TheLoop->getHeader();
591
592 // Returns true if a given Phi is a supported induction.
593 auto isSupportedPhi = [&](PHINode &Phi) -> bool {
594 InductionDescriptor ID;
595 if (InductionDescriptor::isInductionPHI(&Phi, TheLoop, PSE, ID) &&
596 ID.getKind() == InductionDescriptor::IK_IntInduction) {
597 addInductionPhi(&Phi, ID, AllowedExit);
598 return true;
599 } else {
600 // Bail out for any Phi in the outer loop header that is not a supported
601 // induction.
602 LLVM_DEBUG(
603 dbgs()
604 << "LV: Found unsupported PHI for outer loop vectorization.\n");
605 return false;
606 }
607 };
608
609 if (llvm::all_of(Header->phis(), isSupportedPhi))
610 return true;
611 else
612 return false;
613 }
614
615 /// Checks if a function is scalarizable according to the TLI, in
616 /// the sense that it should be vectorized and then expanded in
617 /// multiple scalar calls. This is represented in the
618 /// TLI via mappings that do not specify a vector name, as in the
619 /// following example:
620 ///
621 /// const VecDesc VecIntrinsics[] = {
622 /// {"llvm.phx.abs.i32", "", 4}
623 /// };
isTLIScalarize(const TargetLibraryInfo & TLI,const CallInst & CI)624 static bool isTLIScalarize(const TargetLibraryInfo &TLI, const CallInst &CI) {
625 const StringRef ScalarName = CI.getCalledFunction()->getName();
626 bool Scalarize = TLI.isFunctionVectorizable(ScalarName);
627 // Check that all known VFs are not associated to a vector
628 // function, i.e. the vector name is emty.
629 if (Scalarize) {
630 ElementCount WidestFixedVF, WidestScalableVF;
631 TLI.getWidestVF(ScalarName, WidestFixedVF, WidestScalableVF);
632 for (ElementCount VF = ElementCount::getFixed(2);
633 ElementCount::isKnownLE(VF, WidestFixedVF); VF *= 2)
634 Scalarize &= !TLI.isFunctionVectorizable(ScalarName, VF);
635 for (ElementCount VF = ElementCount::getScalable(1);
636 ElementCount::isKnownLE(VF, WidestScalableVF); VF *= 2)
637 Scalarize &= !TLI.isFunctionVectorizable(ScalarName, VF);
638 assert((WidestScalableVF.isZero() || !Scalarize) &&
639 "Caller may decide to scalarize a variant using a scalable VF");
640 }
641 return Scalarize;
642 }
643
canVectorizeInstrs()644 bool LoopVectorizationLegality::canVectorizeInstrs() {
645 BasicBlock *Header = TheLoop->getHeader();
646
647 // For each block in the loop.
648 for (BasicBlock *BB : TheLoop->blocks()) {
649 // Scan the instructions in the block and look for hazards.
650 for (Instruction &I : *BB) {
651 if (auto *Phi = dyn_cast<PHINode>(&I)) {
652 Type *PhiTy = Phi->getType();
653 // Check that this PHI type is allowed.
654 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
655 !PhiTy->isPointerTy()) {
656 reportVectorizationFailure("Found a non-int non-pointer PHI",
657 "loop control flow is not understood by vectorizer",
658 "CFGNotUnderstood", ORE, TheLoop);
659 return false;
660 }
661
662 // If this PHINode is not in the header block, then we know that we
663 // can convert it to select during if-conversion. No need to check if
664 // the PHIs in this block are induction or reduction variables.
665 if (BB != Header) {
666 // Non-header phi nodes that have outside uses can be vectorized. Add
667 // them to the list of allowed exits.
668 // Unsafe cyclic dependencies with header phis are identified during
669 // legalization for reduction, induction and first order
670 // recurrences.
671 AllowedExit.insert(&I);
672 continue;
673 }
674
675 // We only allow if-converted PHIs with exactly two incoming values.
676 if (Phi->getNumIncomingValues() != 2) {
677 reportVectorizationFailure("Found an invalid PHI",
678 "loop control flow is not understood by vectorizer",
679 "CFGNotUnderstood", ORE, TheLoop, Phi);
680 return false;
681 }
682
683 RecurrenceDescriptor RedDes;
684 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes, DB, AC,
685 DT, PSE.getSE())) {
686 Requirements->addExactFPMathInst(RedDes.getExactFPMathInst());
687 AllowedExit.insert(RedDes.getLoopExitInstr());
688 Reductions[Phi] = RedDes;
689 continue;
690 }
691
692 // TODO: Instead of recording the AllowedExit, it would be good to record the
693 // complementary set: NotAllowedExit. These include (but may not be
694 // limited to):
695 // 1. Reduction phis as they represent the one-before-last value, which
696 // is not available when vectorized
697 // 2. Induction phis and increment when SCEV predicates cannot be used
698 // outside the loop - see addInductionPhi
699 // 3. Non-Phis with outside uses when SCEV predicates cannot be used
700 // outside the loop - see call to hasOutsideLoopUser in the non-phi
701 // handling below
702 // 4. FirstOrderRecurrence phis that can possibly be handled by
703 // extraction.
704 // By recording these, we can then reason about ways to vectorize each
705 // of these NotAllowedExit.
706 InductionDescriptor ID;
707 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) {
708 addInductionPhi(Phi, ID, AllowedExit);
709 Requirements->addExactFPMathInst(ID.getExactFPMathInst());
710 continue;
711 }
712
713 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop,
714 SinkAfter, DT)) {
715 AllowedExit.insert(Phi);
716 FirstOrderRecurrences.insert(Phi);
717 continue;
718 }
719
720 // As a last resort, coerce the PHI to a AddRec expression
721 // and re-try classifying it a an induction PHI.
722 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) {
723 addInductionPhi(Phi, ID, AllowedExit);
724 continue;
725 }
726
727 reportVectorizationFailure("Found an unidentified PHI",
728 "value that could not be identified as "
729 "reduction is used outside the loop",
730 "NonReductionValueUsedOutsideLoop", ORE, TheLoop, Phi);
731 return false;
732 } // end of PHI handling
733
734 // We handle calls that:
735 // * Are debug info intrinsics.
736 // * Have a mapping to an IR intrinsic.
737 // * Have a vector version available.
738 auto *CI = dyn_cast<CallInst>(&I);
739
740 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
741 !isa<DbgInfoIntrinsic>(CI) &&
742 !(CI->getCalledFunction() && TLI &&
743 (!VFDatabase::getMappings(*CI).empty() ||
744 isTLIScalarize(*TLI, *CI)))) {
745 // If the call is a recognized math libary call, it is likely that
746 // we can vectorize it given loosened floating-point constraints.
747 LibFunc Func;
748 bool IsMathLibCall =
749 TLI && CI->getCalledFunction() &&
750 CI->getType()->isFloatingPointTy() &&
751 TLI->getLibFunc(CI->getCalledFunction()->getName(), Func) &&
752 TLI->hasOptimizedCodeGen(Func);
753
754 if (IsMathLibCall) {
755 // TODO: Ideally, we should not use clang-specific language here,
756 // but it's hard to provide meaningful yet generic advice.
757 // Also, should this be guarded by allowExtraAnalysis() and/or be part
758 // of the returned info from isFunctionVectorizable()?
759 reportVectorizationFailure(
760 "Found a non-intrinsic callsite",
761 "library call cannot be vectorized. "
762 "Try compiling with -fno-math-errno, -ffast-math, "
763 "or similar flags",
764 "CantVectorizeLibcall", ORE, TheLoop, CI);
765 } else {
766 reportVectorizationFailure("Found a non-intrinsic callsite",
767 "call instruction cannot be vectorized",
768 "CantVectorizeLibcall", ORE, TheLoop, CI);
769 }
770 return false;
771 }
772
773 // Some intrinsics have scalar arguments and should be same in order for
774 // them to be vectorized (i.e. loop invariant).
775 if (CI) {
776 auto *SE = PSE.getSE();
777 Intrinsic::ID IntrinID = getVectorIntrinsicIDForCall(CI, TLI);
778 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i)
779 if (isVectorIntrinsicWithScalarOpAtArg(IntrinID, i)) {
780 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(i)), TheLoop)) {
781 reportVectorizationFailure("Found unvectorizable intrinsic",
782 "intrinsic instruction cannot be vectorized",
783 "CantVectorizeIntrinsic", ORE, TheLoop, CI);
784 return false;
785 }
786 }
787 }
788
789 // Check that the instruction return type is vectorizable.
790 // Also, we can't vectorize extractelement instructions.
791 if ((!VectorType::isValidElementType(I.getType()) &&
792 !I.getType()->isVoidTy()) ||
793 isa<ExtractElementInst>(I)) {
794 reportVectorizationFailure("Found unvectorizable type",
795 "instruction return type cannot be vectorized",
796 "CantVectorizeInstructionReturnType", ORE, TheLoop, &I);
797 return false;
798 }
799
800 // Check that the stored type is vectorizable.
801 if (auto *ST = dyn_cast<StoreInst>(&I)) {
802 Type *T = ST->getValueOperand()->getType();
803 if (!VectorType::isValidElementType(T)) {
804 reportVectorizationFailure("Store instruction cannot be vectorized",
805 "store instruction cannot be vectorized",
806 "CantVectorizeStore", ORE, TheLoop, ST);
807 return false;
808 }
809
810 // For nontemporal stores, check that a nontemporal vector version is
811 // supported on the target.
812 if (ST->getMetadata(LLVMContext::MD_nontemporal)) {
813 // Arbitrarily try a vector of 2 elements.
814 auto *VecTy = FixedVectorType::get(T, /*NumElts=*/2);
815 assert(VecTy && "did not find vectorized version of stored type");
816 if (!TTI->isLegalNTStore(VecTy, ST->getAlign())) {
817 reportVectorizationFailure(
818 "nontemporal store instruction cannot be vectorized",
819 "nontemporal store instruction cannot be vectorized",
820 "CantVectorizeNontemporalStore", ORE, TheLoop, ST);
821 return false;
822 }
823 }
824
825 } else if (auto *LD = dyn_cast<LoadInst>(&I)) {
826 if (LD->getMetadata(LLVMContext::MD_nontemporal)) {
827 // For nontemporal loads, check that a nontemporal vector version is
828 // supported on the target (arbitrarily try a vector of 2 elements).
829 auto *VecTy = FixedVectorType::get(I.getType(), /*NumElts=*/2);
830 assert(VecTy && "did not find vectorized version of load type");
831 if (!TTI->isLegalNTLoad(VecTy, LD->getAlign())) {
832 reportVectorizationFailure(
833 "nontemporal load instruction cannot be vectorized",
834 "nontemporal load instruction cannot be vectorized",
835 "CantVectorizeNontemporalLoad", ORE, TheLoop, LD);
836 return false;
837 }
838 }
839
840 // FP instructions can allow unsafe algebra, thus vectorizable by
841 // non-IEEE-754 compliant SIMD units.
842 // This applies to floating-point math operations and calls, not memory
843 // operations, shuffles, or casts, as they don't change precision or
844 // semantics.
845 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
846 !I.isFast()) {
847 LLVM_DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
848 Hints->setPotentiallyUnsafe();
849 }
850
851 // Reduction instructions are allowed to have exit users.
852 // All other instructions must not have external users.
853 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
854 // We can safely vectorize loops where instructions within the loop are
855 // used outside the loop only if the SCEV predicates within the loop is
856 // same as outside the loop. Allowing the exit means reusing the SCEV
857 // outside the loop.
858 if (PSE.getPredicate().isAlwaysTrue()) {
859 AllowedExit.insert(&I);
860 continue;
861 }
862 reportVectorizationFailure("Value cannot be used outside the loop",
863 "value cannot be used outside the loop",
864 "ValueUsedOutsideLoop", ORE, TheLoop, &I);
865 return false;
866 }
867 } // next instr.
868 }
869
870 if (!PrimaryInduction) {
871 if (Inductions.empty()) {
872 reportVectorizationFailure("Did not find one integer induction var",
873 "loop induction variable could not be identified",
874 "NoInductionVariable", ORE, TheLoop);
875 return false;
876 } else if (!WidestIndTy) {
877 reportVectorizationFailure("Did not find one integer induction var",
878 "integer loop induction variable could not be identified",
879 "NoIntegerInductionVariable", ORE, TheLoop);
880 return false;
881 } else {
882 LLVM_DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
883 }
884 }
885
886 // For first order recurrences, we use the previous value (incoming value from
887 // the latch) to check if it dominates all users of the recurrence. Bail out
888 // if we have to sink such an instruction for another recurrence, as the
889 // dominance requirement may not hold after sinking.
890 BasicBlock *LoopLatch = TheLoop->getLoopLatch();
891 if (any_of(FirstOrderRecurrences, [LoopLatch, this](const PHINode *Phi) {
892 Instruction *V =
893 cast<Instruction>(Phi->getIncomingValueForBlock(LoopLatch));
894 return SinkAfter.find(V) != SinkAfter.end();
895 }))
896 return false;
897
898 // Now we know the widest induction type, check if our found induction
899 // is the same size. If it's not, unset it here and InnerLoopVectorizer
900 // will create another.
901 if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType())
902 PrimaryInduction = nullptr;
903
904 return true;
905 }
906
canVectorizeMemory()907 bool LoopVectorizationLegality::canVectorizeMemory() {
908 LAI = &(*GetLAA)(*TheLoop);
909 const OptimizationRemarkAnalysis *LAR = LAI->getReport();
910 if (LAR) {
911 ORE->emit([&]() {
912 return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(),
913 "loop not vectorized: ", *LAR);
914 });
915 }
916
917 if (!LAI->canVectorizeMemory())
918 return false;
919
920 // We can vectorize stores to invariant address when final reduction value is
921 // guaranteed to be stored at the end of the loop. Also, if decision to
922 // vectorize loop is made, runtime checks are added so as to make sure that
923 // invariant address won't alias with any other objects.
924 if (!LAI->getStoresToInvariantAddresses().empty()) {
925 // For each invariant address, check its last stored value is unconditional.
926 for (StoreInst *SI : LAI->getStoresToInvariantAddresses()) {
927 if (isInvariantStoreOfReduction(SI) &&
928 blockNeedsPredication(SI->getParent())) {
929 reportVectorizationFailure(
930 "We don't allow storing to uniform addresses",
931 "write of conditional recurring variant value to a loop "
932 "invariant address could not be vectorized",
933 "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop);
934 return false;
935 }
936 }
937
938 if (LAI->hasDependenceInvolvingLoopInvariantAddress()) {
939 // For each invariant address, check its last stored value is the result
940 // of one of our reductions.
941 //
942 // We do not check if dependence with loads exists because they are
943 // currently rejected earlier in LoopAccessInfo::analyzeLoop. In case this
944 // behaviour changes we have to modify this code.
945 ScalarEvolution *SE = PSE.getSE();
946 SmallVector<StoreInst *, 4> UnhandledStores;
947 for (StoreInst *SI : LAI->getStoresToInvariantAddresses()) {
948 if (isInvariantStoreOfReduction(SI)) {
949 // Earlier stores to this address are effectively deadcode.
950 // With opaque pointers it is possible for one pointer to be used with
951 // different sizes of stored values:
952 // store i32 0, ptr %x
953 // store i8 0, ptr %x
954 // The latest store doesn't complitely overwrite the first one in the
955 // example. That is why we have to make sure that types of stored
956 // values are same.
957 // TODO: Check that bitwidth of unhandled store is smaller then the
958 // one that overwrites it and add a test.
959 erase_if(UnhandledStores, [SE, SI](StoreInst *I) {
960 return storeToSameAddress(SE, SI, I) &&
961 I->getValueOperand()->getType() ==
962 SI->getValueOperand()->getType();
963 });
964 continue;
965 }
966 UnhandledStores.push_back(SI);
967 }
968
969 bool IsOK = UnhandledStores.empty();
970 // TODO: we should also validate against InvariantMemSets.
971 if (!IsOK) {
972 reportVectorizationFailure(
973 "We don't allow storing to uniform addresses",
974 "write to a loop invariant address could not "
975 "be vectorized",
976 "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop);
977 return false;
978 }
979 }
980 }
981
982 PSE.addPredicate(LAI->getPSE().getPredicate());
983 return true;
984 }
985
canVectorizeFPMath(bool EnableStrictReductions)986 bool LoopVectorizationLegality::canVectorizeFPMath(
987 bool EnableStrictReductions) {
988
989 // First check if there is any ExactFP math or if we allow reassociations
990 if (!Requirements->getExactFPInst() || Hints->allowReordering())
991 return true;
992
993 // If the above is false, we have ExactFPMath & do not allow reordering.
994 // If the EnableStrictReductions flag is set, first check if we have any
995 // Exact FP induction vars, which we cannot vectorize.
996 if (!EnableStrictReductions ||
997 any_of(getInductionVars(), [&](auto &Induction) -> bool {
998 InductionDescriptor IndDesc = Induction.second;
999 return IndDesc.getExactFPMathInst();
1000 }))
1001 return false;
1002
1003 // We can now only vectorize if all reductions with Exact FP math also
1004 // have the isOrdered flag set, which indicates that we can move the
1005 // reduction operations in-loop.
1006 return (all_of(getReductionVars(), [&](auto &Reduction) -> bool {
1007 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1008 return !RdxDesc.hasExactFPMath() || RdxDesc.isOrdered();
1009 }));
1010 }
1011
isInvariantStoreOfReduction(StoreInst * SI)1012 bool LoopVectorizationLegality::isInvariantStoreOfReduction(StoreInst *SI) {
1013 return any_of(getReductionVars(), [&](auto &Reduction) -> bool {
1014 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1015 return RdxDesc.IntermediateStore == SI;
1016 });
1017 }
1018
isInvariantAddressOfReduction(Value * V)1019 bool LoopVectorizationLegality::isInvariantAddressOfReduction(Value *V) {
1020 return any_of(getReductionVars(), [&](auto &Reduction) -> bool {
1021 const RecurrenceDescriptor &RdxDesc = Reduction.second;
1022 if (!RdxDesc.IntermediateStore)
1023 return false;
1024
1025 ScalarEvolution *SE = PSE.getSE();
1026 Value *InvariantAddress = RdxDesc.IntermediateStore->getPointerOperand();
1027 return V == InvariantAddress ||
1028 SE->getSCEV(V) == SE->getSCEV(InvariantAddress);
1029 });
1030 }
1031
isInductionPhi(const Value * V) const1032 bool LoopVectorizationLegality::isInductionPhi(const Value *V) const {
1033 Value *In0 = const_cast<Value *>(V);
1034 PHINode *PN = dyn_cast_or_null<PHINode>(In0);
1035 if (!PN)
1036 return false;
1037
1038 return Inductions.count(PN);
1039 }
1040
1041 const InductionDescriptor *
getIntOrFpInductionDescriptor(PHINode * Phi) const1042 LoopVectorizationLegality::getIntOrFpInductionDescriptor(PHINode *Phi) const {
1043 if (!isInductionPhi(Phi))
1044 return nullptr;
1045 auto &ID = getInductionVars().find(Phi)->second;
1046 if (ID.getKind() == InductionDescriptor::IK_IntInduction ||
1047 ID.getKind() == InductionDescriptor::IK_FpInduction)
1048 return &ID;
1049 return nullptr;
1050 }
1051
1052 const InductionDescriptor *
getPointerInductionDescriptor(PHINode * Phi) const1053 LoopVectorizationLegality::getPointerInductionDescriptor(PHINode *Phi) const {
1054 if (!isInductionPhi(Phi))
1055 return nullptr;
1056 auto &ID = getInductionVars().find(Phi)->second;
1057 if (ID.getKind() == InductionDescriptor::IK_PtrInduction)
1058 return &ID;
1059 return nullptr;
1060 }
1061
isCastedInductionVariable(const Value * V) const1062 bool LoopVectorizationLegality::isCastedInductionVariable(
1063 const Value *V) const {
1064 auto *Inst = dyn_cast<Instruction>(V);
1065 return (Inst && InductionCastsToIgnore.count(Inst));
1066 }
1067
isInductionVariable(const Value * V) const1068 bool LoopVectorizationLegality::isInductionVariable(const Value *V) const {
1069 return isInductionPhi(V) || isCastedInductionVariable(V);
1070 }
1071
isFirstOrderRecurrence(const PHINode * Phi) const1072 bool LoopVectorizationLegality::isFirstOrderRecurrence(
1073 const PHINode *Phi) const {
1074 return FirstOrderRecurrences.count(Phi);
1075 }
1076
blockNeedsPredication(BasicBlock * BB) const1077 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) const {
1078 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
1079 }
1080
blockCanBePredicated(BasicBlock * BB,SmallPtrSetImpl<Value * > & SafePtrs,SmallPtrSetImpl<const Instruction * > & MaskedOp,SmallPtrSetImpl<Instruction * > & ConditionalAssumes) const1081 bool LoopVectorizationLegality::blockCanBePredicated(
1082 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
1083 SmallPtrSetImpl<const Instruction *> &MaskedOp,
1084 SmallPtrSetImpl<Instruction *> &ConditionalAssumes) const {
1085 for (Instruction &I : *BB) {
1086 // We can predicate blocks with calls to assume, as long as we drop them in
1087 // case we flatten the CFG via predication.
1088 if (match(&I, m_Intrinsic<Intrinsic::assume>())) {
1089 ConditionalAssumes.insert(&I);
1090 continue;
1091 }
1092
1093 // Do not let llvm.experimental.noalias.scope.decl block the vectorization.
1094 // TODO: there might be cases that it should block the vectorization. Let's
1095 // ignore those for now.
1096 if (isa<NoAliasScopeDeclInst>(&I))
1097 continue;
1098
1099 // We might be able to hoist the load.
1100 if (I.mayReadFromMemory()) {
1101 auto *LI = dyn_cast<LoadInst>(&I);
1102 if (!LI)
1103 return false;
1104 if (!SafePtrs.count(LI->getPointerOperand())) {
1105 MaskedOp.insert(LI);
1106 continue;
1107 }
1108 }
1109
1110 if (I.mayWriteToMemory()) {
1111 auto *SI = dyn_cast<StoreInst>(&I);
1112 if (!SI)
1113 return false;
1114 // Predicated store requires some form of masking:
1115 // 1) masked store HW instruction,
1116 // 2) emulation via load-blend-store (only if safe and legal to do so,
1117 // be aware on the race conditions), or
1118 // 3) element-by-element predicate check and scalar store.
1119 MaskedOp.insert(SI);
1120 continue;
1121 }
1122 if (I.mayThrow())
1123 return false;
1124 }
1125
1126 return true;
1127 }
1128
canVectorizeWithIfConvert()1129 bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
1130 if (!EnableIfConversion) {
1131 reportVectorizationFailure("If-conversion is disabled",
1132 "if-conversion is disabled",
1133 "IfConversionDisabled",
1134 ORE, TheLoop);
1135 return false;
1136 }
1137
1138 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
1139
1140 // A list of pointers which are known to be dereferenceable within scope of
1141 // the loop body for each iteration of the loop which executes. That is,
1142 // the memory pointed to can be dereferenced (with the access size implied by
1143 // the value's type) unconditionally within the loop header without
1144 // introducing a new fault.
1145 SmallPtrSet<Value *, 8> SafePointers;
1146
1147 // Collect safe addresses.
1148 for (BasicBlock *BB : TheLoop->blocks()) {
1149 if (!blockNeedsPredication(BB)) {
1150 for (Instruction &I : *BB)
1151 if (auto *Ptr = getLoadStorePointerOperand(&I))
1152 SafePointers.insert(Ptr);
1153 continue;
1154 }
1155
1156 // For a block which requires predication, a address may be safe to access
1157 // in the loop w/o predication if we can prove dereferenceability facts
1158 // sufficient to ensure it'll never fault within the loop. For the moment,
1159 // we restrict this to loads; stores are more complicated due to
1160 // concurrency restrictions.
1161 ScalarEvolution &SE = *PSE.getSE();
1162 for (Instruction &I : *BB) {
1163 LoadInst *LI = dyn_cast<LoadInst>(&I);
1164 if (LI && !LI->getType()->isVectorTy() && !mustSuppressSpeculation(*LI) &&
1165 isDereferenceableAndAlignedInLoop(LI, TheLoop, SE, *DT))
1166 SafePointers.insert(LI->getPointerOperand());
1167 }
1168 }
1169
1170 // Collect the blocks that need predication.
1171 for (BasicBlock *BB : TheLoop->blocks()) {
1172 // We don't support switch statements inside loops.
1173 if (!isa<BranchInst>(BB->getTerminator())) {
1174 reportVectorizationFailure("Loop contains a switch statement",
1175 "loop contains a switch statement",
1176 "LoopContainsSwitch", ORE, TheLoop,
1177 BB->getTerminator());
1178 return false;
1179 }
1180
1181 // We must be able to predicate all blocks that need to be predicated.
1182 if (blockNeedsPredication(BB)) {
1183 if (!blockCanBePredicated(BB, SafePointers, MaskedOp,
1184 ConditionalAssumes)) {
1185 reportVectorizationFailure(
1186 "Control flow cannot be substituted for a select",
1187 "control flow cannot be substituted for a select",
1188 "NoCFGForSelect", ORE, TheLoop,
1189 BB->getTerminator());
1190 return false;
1191 }
1192 }
1193 }
1194
1195 // We can if-convert this loop.
1196 return true;
1197 }
1198
1199 // Helper function to canVectorizeLoopNestCFG.
canVectorizeLoopCFG(Loop * Lp,bool UseVPlanNativePath)1200 bool LoopVectorizationLegality::canVectorizeLoopCFG(Loop *Lp,
1201 bool UseVPlanNativePath) {
1202 assert((UseVPlanNativePath || Lp->isInnermost()) &&
1203 "VPlan-native path is not enabled.");
1204
1205 // TODO: ORE should be improved to show more accurate information when an
1206 // outer loop can't be vectorized because a nested loop is not understood or
1207 // legal. Something like: "outer_loop_location: loop not vectorized:
1208 // (inner_loop_location) loop control flow is not understood by vectorizer".
1209
1210 // Store the result and return it at the end instead of exiting early, in case
1211 // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
1212 bool Result = true;
1213 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
1214
1215 // We must have a loop in canonical form. Loops with indirectbr in them cannot
1216 // be canonicalized.
1217 if (!Lp->getLoopPreheader()) {
1218 reportVectorizationFailure("Loop doesn't have a legal pre-header",
1219 "loop control flow is not understood by vectorizer",
1220 "CFGNotUnderstood", ORE, TheLoop);
1221 if (DoExtraAnalysis)
1222 Result = false;
1223 else
1224 return false;
1225 }
1226
1227 // We must have a single backedge.
1228 if (Lp->getNumBackEdges() != 1) {
1229 reportVectorizationFailure("The loop must have a single backedge",
1230 "loop control flow is not understood by vectorizer",
1231 "CFGNotUnderstood", ORE, TheLoop);
1232 if (DoExtraAnalysis)
1233 Result = false;
1234 else
1235 return false;
1236 }
1237
1238 return Result;
1239 }
1240
canVectorizeLoopNestCFG(Loop * Lp,bool UseVPlanNativePath)1241 bool LoopVectorizationLegality::canVectorizeLoopNestCFG(
1242 Loop *Lp, bool UseVPlanNativePath) {
1243 // Store the result and return it at the end instead of exiting early, in case
1244 // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
1245 bool Result = true;
1246 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
1247 if (!canVectorizeLoopCFG(Lp, UseVPlanNativePath)) {
1248 if (DoExtraAnalysis)
1249 Result = false;
1250 else
1251 return false;
1252 }
1253
1254 // Recursively check whether the loop control flow of nested loops is
1255 // understood.
1256 for (Loop *SubLp : *Lp)
1257 if (!canVectorizeLoopNestCFG(SubLp, UseVPlanNativePath)) {
1258 if (DoExtraAnalysis)
1259 Result = false;
1260 else
1261 return false;
1262 }
1263
1264 return Result;
1265 }
1266
canVectorize(bool UseVPlanNativePath)1267 bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
1268 // Store the result and return it at the end instead of exiting early, in case
1269 // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
1270 bool Result = true;
1271
1272 bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
1273 // Check whether the loop-related control flow in the loop nest is expected by
1274 // vectorizer.
1275 if (!canVectorizeLoopNestCFG(TheLoop, UseVPlanNativePath)) {
1276 if (DoExtraAnalysis)
1277 Result = false;
1278 else
1279 return false;
1280 }
1281
1282 // We need to have a loop header.
1283 LLVM_DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
1284 << '\n');
1285
1286 // Specific checks for outer loops. We skip the remaining legal checks at this
1287 // point because they don't support outer loops.
1288 if (!TheLoop->isInnermost()) {
1289 assert(UseVPlanNativePath && "VPlan-native path is not enabled.");
1290
1291 if (!canVectorizeOuterLoop()) {
1292 reportVectorizationFailure("Unsupported outer loop",
1293 "unsupported outer loop",
1294 "UnsupportedOuterLoop",
1295 ORE, TheLoop);
1296 // TODO: Implement DoExtraAnalysis when subsequent legal checks support
1297 // outer loops.
1298 return false;
1299 }
1300
1301 LLVM_DEBUG(dbgs() << "LV: We can vectorize this outer loop!\n");
1302 return Result;
1303 }
1304
1305 assert(TheLoop->isInnermost() && "Inner loop expected.");
1306 // Check if we can if-convert non-single-bb loops.
1307 unsigned NumBlocks = TheLoop->getNumBlocks();
1308 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
1309 LLVM_DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
1310 if (DoExtraAnalysis)
1311 Result = false;
1312 else
1313 return false;
1314 }
1315
1316 // Check if we can vectorize the instructions and CFG in this loop.
1317 if (!canVectorizeInstrs()) {
1318 LLVM_DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
1319 if (DoExtraAnalysis)
1320 Result = false;
1321 else
1322 return false;
1323 }
1324
1325 // Go over each instruction and look at memory deps.
1326 if (!canVectorizeMemory()) {
1327 LLVM_DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
1328 if (DoExtraAnalysis)
1329 Result = false;
1330 else
1331 return false;
1332 }
1333
1334 LLVM_DEBUG(dbgs() << "LV: We can vectorize this loop"
1335 << (LAI->getRuntimePointerChecking()->Need
1336 ? " (with a runtime bound check)"
1337 : "")
1338 << "!\n");
1339
1340 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
1341 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
1342 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
1343
1344 if (PSE.getPredicate().getComplexity() > SCEVThreshold) {
1345 reportVectorizationFailure("Too many SCEV checks needed",
1346 "Too many SCEV assumptions need to be made and checked at runtime",
1347 "TooManySCEVRunTimeChecks", ORE, TheLoop);
1348 if (DoExtraAnalysis)
1349 Result = false;
1350 else
1351 return false;
1352 }
1353
1354 // Okay! We've done all the tests. If any have failed, return false. Otherwise
1355 // we can vectorize, and at this point we don't have any other mem analysis
1356 // which may limit our maximum vectorization factor, so just return true with
1357 // no restrictions.
1358 return Result;
1359 }
1360
prepareToFoldTailByMasking()1361 bool LoopVectorizationLegality::prepareToFoldTailByMasking() {
1362
1363 LLVM_DEBUG(dbgs() << "LV: checking if tail can be folded by masking.\n");
1364
1365 SmallPtrSet<const Value *, 8> ReductionLiveOuts;
1366
1367 for (auto &Reduction : getReductionVars())
1368 ReductionLiveOuts.insert(Reduction.second.getLoopExitInstr());
1369
1370 // TODO: handle non-reduction outside users when tail is folded by masking.
1371 for (auto *AE : AllowedExit) {
1372 // Check that all users of allowed exit values are inside the loop or
1373 // are the live-out of a reduction.
1374 if (ReductionLiveOuts.count(AE))
1375 continue;
1376 for (User *U : AE->users()) {
1377 Instruction *UI = cast<Instruction>(U);
1378 if (TheLoop->contains(UI))
1379 continue;
1380 LLVM_DEBUG(
1381 dbgs()
1382 << "LV: Cannot fold tail by masking, loop has an outside user for "
1383 << *UI << "\n");
1384 return false;
1385 }
1386 }
1387
1388 // The list of pointers that we can safely read and write to remains empty.
1389 SmallPtrSet<Value *, 8> SafePointers;
1390
1391 SmallPtrSet<const Instruction *, 8> TmpMaskedOp;
1392 SmallPtrSet<Instruction *, 8> TmpConditionalAssumes;
1393
1394 // Check and mark all blocks for predication, including those that ordinarily
1395 // do not need predication such as the header block.
1396 for (BasicBlock *BB : TheLoop->blocks()) {
1397 if (!blockCanBePredicated(BB, SafePointers, TmpMaskedOp,
1398 TmpConditionalAssumes)) {
1399 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking as requested.\n");
1400 return false;
1401 }
1402 }
1403
1404 LLVM_DEBUG(dbgs() << "LV: can fold tail by masking.\n");
1405
1406 MaskedOp.insert(TmpMaskedOp.begin(), TmpMaskedOp.end());
1407 ConditionalAssumes.insert(TmpConditionalAssumes.begin(),
1408 TmpConditionalAssumes.end());
1409
1410 return true;
1411 }
1412
1413 } // namespace llvm
1414