1 //===- LoopDistribute.cpp - Loop Distribution Pass ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the Loop Distribution Pass. Its main focus is to
11 // distribute loops that cannot be vectorized due to dependence cycles. It
12 // tries to isolate the offending dependences into a new loop allowing
13 // vectorization of the remaining parts.
14 //
15 // For dependence analysis, the pass uses the LoopVectorizer's
16 // LoopAccessAnalysis. Because this analysis presumes no change in the order of
17 // memory operations, special care is taken to preserve the lexical order of
18 // these operations.
19 //
20 // Similarly to the Vectorizer, the pass also supports loop versioning to
21 // run-time disambiguate potentially overlapping arrays.
22 //
23 //===----------------------------------------------------------------------===//
24
25 #include "llvm/Transforms/Scalar/LoopDistribute.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DepthFirstIterator.h"
28 #include "llvm/ADT/EquivalenceClasses.h"
29 #include "llvm/ADT/Optional.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/StringRef.h"
35 #include "llvm/ADT/Twine.h"
36 #include "llvm/ADT/iterator_range.h"
37 #include "llvm/Analysis/AliasAnalysis.h"
38 #include "llvm/Analysis/AssumptionCache.h"
39 #include "llvm/Analysis/GlobalsModRef.h"
40 #include "llvm/Analysis/LoopAccessAnalysis.h"
41 #include "llvm/Analysis/LoopAnalysisManager.h"
42 #include "llvm/Analysis/LoopInfo.h"
43 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
44 #include "llvm/Analysis/ScalarEvolution.h"
45 #include "llvm/Analysis/TargetLibraryInfo.h"
46 #include "llvm/Analysis/TargetTransformInfo.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DiagnosticInfo.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/PassManager.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Pass.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Transforms/Scalar.h"
65 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
66 #include "llvm/Transforms/Utils/Cloning.h"
67 #include "llvm/Transforms/Utils/LoopUtils.h"
68 #include "llvm/Transforms/Utils/LoopVersioning.h"
69 #include "llvm/Transforms/Utils/ValueMapper.h"
70 #include <cassert>
71 #include <functional>
72 #include <list>
73 #include <tuple>
74 #include <utility>
75
76 using namespace llvm;
77
78 #define LDIST_NAME "loop-distribute"
79 #define DEBUG_TYPE LDIST_NAME
80
81 /// @{
82 /// Metadata attribute names
83 static const char *const LLVMLoopDistributeFollowupAll =
84 "llvm.loop.distribute.followup_all";
85 static const char *const LLVMLoopDistributeFollowupCoincident =
86 "llvm.loop.distribute.followup_coincident";
87 static const char *const LLVMLoopDistributeFollowupSequential =
88 "llvm.loop.distribute.followup_sequential";
89 static const char *const LLVMLoopDistributeFollowupFallback =
90 "llvm.loop.distribute.followup_fallback";
91 /// @}
92
93 static cl::opt<bool>
94 LDistVerify("loop-distribute-verify", cl::Hidden,
95 cl::desc("Turn on DominatorTree and LoopInfo verification "
96 "after Loop Distribution"),
97 cl::init(false));
98
99 static cl::opt<bool> DistributeNonIfConvertible(
100 "loop-distribute-non-if-convertible", cl::Hidden,
101 cl::desc("Whether to distribute into a loop that may not be "
102 "if-convertible by the loop vectorizer"),
103 cl::init(false));
104
105 static cl::opt<unsigned> DistributeSCEVCheckThreshold(
106 "loop-distribute-scev-check-threshold", cl::init(8), cl::Hidden,
107 cl::desc("The maximum number of SCEV checks allowed for Loop "
108 "Distribution"));
109
110 static cl::opt<unsigned> PragmaDistributeSCEVCheckThreshold(
111 "loop-distribute-scev-check-threshold-with-pragma", cl::init(128),
112 cl::Hidden,
113 cl::desc(
114 "The maximum number of SCEV checks allowed for Loop "
115 "Distribution for loop marked with #pragma loop distribute(enable)"));
116
117 static cl::opt<bool> EnableLoopDistribute(
118 "enable-loop-distribute", cl::Hidden,
119 cl::desc("Enable the new, experimental LoopDistribution Pass"),
120 cl::init(false));
121
122 STATISTIC(NumLoopsDistributed, "Number of loops distributed");
123
124 namespace {
125
126 /// Maintains the set of instructions of the loop for a partition before
127 /// cloning. After cloning, it hosts the new loop.
128 class InstPartition {
129 using InstructionSet = SmallPtrSet<Instruction *, 8>;
130
131 public:
InstPartition(Instruction * I,Loop * L,bool DepCycle=false)132 InstPartition(Instruction *I, Loop *L, bool DepCycle = false)
133 : DepCycle(DepCycle), OrigLoop(L) {
134 Set.insert(I);
135 }
136
137 /// Returns whether this partition contains a dependence cycle.
hasDepCycle() const138 bool hasDepCycle() const { return DepCycle; }
139
140 /// Adds an instruction to this partition.
add(Instruction * I)141 void add(Instruction *I) { Set.insert(I); }
142
143 /// Collection accessors.
begin()144 InstructionSet::iterator begin() { return Set.begin(); }
end()145 InstructionSet::iterator end() { return Set.end(); }
begin() const146 InstructionSet::const_iterator begin() const { return Set.begin(); }
end() const147 InstructionSet::const_iterator end() const { return Set.end(); }
empty() const148 bool empty() const { return Set.empty(); }
149
150 /// Moves this partition into \p Other. This partition becomes empty
151 /// after this.
moveTo(InstPartition & Other)152 void moveTo(InstPartition &Other) {
153 Other.Set.insert(Set.begin(), Set.end());
154 Set.clear();
155 Other.DepCycle |= DepCycle;
156 }
157
158 /// Populates the partition with a transitive closure of all the
159 /// instructions that the seeded instructions dependent on.
populateUsedSet()160 void populateUsedSet() {
161 // FIXME: We currently don't use control-dependence but simply include all
162 // blocks (possibly empty at the end) and let simplifycfg mostly clean this
163 // up.
164 for (auto *B : OrigLoop->getBlocks())
165 Set.insert(B->getTerminator());
166
167 // Follow the use-def chains to form a transitive closure of all the
168 // instructions that the originally seeded instructions depend on.
169 SmallVector<Instruction *, 8> Worklist(Set.begin(), Set.end());
170 while (!Worklist.empty()) {
171 Instruction *I = Worklist.pop_back_val();
172 // Insert instructions from the loop that we depend on.
173 for (Value *V : I->operand_values()) {
174 auto *I = dyn_cast<Instruction>(V);
175 if (I && OrigLoop->contains(I->getParent()) && Set.insert(I).second)
176 Worklist.push_back(I);
177 }
178 }
179 }
180
181 /// Clones the original loop.
182 ///
183 /// Updates LoopInfo and DominatorTree using the information that block \p
184 /// LoopDomBB dominates the loop.
cloneLoopWithPreheader(BasicBlock * InsertBefore,BasicBlock * LoopDomBB,unsigned Index,LoopInfo * LI,DominatorTree * DT)185 Loop *cloneLoopWithPreheader(BasicBlock *InsertBefore, BasicBlock *LoopDomBB,
186 unsigned Index, LoopInfo *LI,
187 DominatorTree *DT) {
188 ClonedLoop = ::cloneLoopWithPreheader(InsertBefore, LoopDomBB, OrigLoop,
189 VMap, Twine(".ldist") + Twine(Index),
190 LI, DT, ClonedLoopBlocks);
191 return ClonedLoop;
192 }
193
194 /// The cloned loop. If this partition is mapped to the original loop,
195 /// this is null.
getClonedLoop() const196 const Loop *getClonedLoop() const { return ClonedLoop; }
197
198 /// Returns the loop where this partition ends up after distribution.
199 /// If this partition is mapped to the original loop then use the block from
200 /// the loop.
getDistributedLoop() const201 Loop *getDistributedLoop() const {
202 return ClonedLoop ? ClonedLoop : OrigLoop;
203 }
204
205 /// The VMap that is populated by cloning and then used in
206 /// remapinstruction to remap the cloned instructions.
getVMap()207 ValueToValueMapTy &getVMap() { return VMap; }
208
209 /// Remaps the cloned instructions using VMap.
remapInstructions()210 void remapInstructions() {
211 remapInstructionsInBlocks(ClonedLoopBlocks, VMap);
212 }
213
214 /// Based on the set of instructions selected for this partition,
215 /// removes the unnecessary ones.
removeUnusedInsts()216 void removeUnusedInsts() {
217 SmallVector<Instruction *, 8> Unused;
218
219 for (auto *Block : OrigLoop->getBlocks())
220 for (auto &Inst : *Block)
221 if (!Set.count(&Inst)) {
222 Instruction *NewInst = &Inst;
223 if (!VMap.empty())
224 NewInst = cast<Instruction>(VMap[NewInst]);
225
226 assert(!isa<BranchInst>(NewInst) &&
227 "Branches are marked used early on");
228 Unused.push_back(NewInst);
229 }
230
231 // Delete the instructions backwards, as it has a reduced likelihood of
232 // having to update as many def-use and use-def chains.
233 for (auto *Inst : reverse(Unused)) {
234 if (!Inst->use_empty())
235 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
236 Inst->eraseFromParent();
237 }
238 }
239
print() const240 void print() const {
241 if (DepCycle)
242 dbgs() << " (cycle)\n";
243 for (auto *I : Set)
244 // Prefix with the block name.
245 dbgs() << " " << I->getParent()->getName() << ":" << *I << "\n";
246 }
247
printBlocks() const248 void printBlocks() const {
249 for (auto *BB : getDistributedLoop()->getBlocks())
250 dbgs() << *BB;
251 }
252
253 private:
254 /// Instructions from OrigLoop selected for this partition.
255 InstructionSet Set;
256
257 /// Whether this partition contains a dependence cycle.
258 bool DepCycle;
259
260 /// The original loop.
261 Loop *OrigLoop;
262
263 /// The cloned loop. If this partition is mapped to the original loop,
264 /// this is null.
265 Loop *ClonedLoop = nullptr;
266
267 /// The blocks of ClonedLoop including the preheader. If this
268 /// partition is mapped to the original loop, this is empty.
269 SmallVector<BasicBlock *, 8> ClonedLoopBlocks;
270
271 /// These gets populated once the set of instructions have been
272 /// finalized. If this partition is mapped to the original loop, these are not
273 /// set.
274 ValueToValueMapTy VMap;
275 };
276
277 /// Holds the set of Partitions. It populates them, merges them and then
278 /// clones the loops.
279 class InstPartitionContainer {
280 using InstToPartitionIdT = DenseMap<Instruction *, int>;
281
282 public:
InstPartitionContainer(Loop * L,LoopInfo * LI,DominatorTree * DT)283 InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT)
284 : L(L), LI(LI), DT(DT) {}
285
286 /// Returns the number of partitions.
getSize() const287 unsigned getSize() const { return PartitionContainer.size(); }
288
289 /// Adds \p Inst into the current partition if that is marked to
290 /// contain cycles. Otherwise start a new partition for it.
addToCyclicPartition(Instruction * Inst)291 void addToCyclicPartition(Instruction *Inst) {
292 // If the current partition is non-cyclic. Start a new one.
293 if (PartitionContainer.empty() || !PartitionContainer.back().hasDepCycle())
294 PartitionContainer.emplace_back(Inst, L, /*DepCycle=*/true);
295 else
296 PartitionContainer.back().add(Inst);
297 }
298
299 /// Adds \p Inst into a partition that is not marked to contain
300 /// dependence cycles.
301 ///
302 // Initially we isolate memory instructions into as many partitions as
303 // possible, then later we may merge them back together.
addToNewNonCyclicPartition(Instruction * Inst)304 void addToNewNonCyclicPartition(Instruction *Inst) {
305 PartitionContainer.emplace_back(Inst, L);
306 }
307
308 /// Merges adjacent non-cyclic partitions.
309 ///
310 /// The idea is that we currently only want to isolate the non-vectorizable
311 /// partition. We could later allow more distribution among these partition
312 /// too.
mergeAdjacentNonCyclic()313 void mergeAdjacentNonCyclic() {
314 mergeAdjacentPartitionsIf(
315 [](const InstPartition *P) { return !P->hasDepCycle(); });
316 }
317
318 /// If a partition contains only conditional stores, we won't vectorize
319 /// it. Try to merge it with a previous cyclic partition.
mergeNonIfConvertible()320 void mergeNonIfConvertible() {
321 mergeAdjacentPartitionsIf([&](const InstPartition *Partition) {
322 if (Partition->hasDepCycle())
323 return true;
324
325 // Now, check if all stores are conditional in this partition.
326 bool seenStore = false;
327
328 for (auto *Inst : *Partition)
329 if (isa<StoreInst>(Inst)) {
330 seenStore = true;
331 if (!LoopAccessInfo::blockNeedsPredication(Inst->getParent(), L, DT))
332 return false;
333 }
334 return seenStore;
335 });
336 }
337
338 /// Merges the partitions according to various heuristics.
mergeBeforePopulating()339 void mergeBeforePopulating() {
340 mergeAdjacentNonCyclic();
341 if (!DistributeNonIfConvertible)
342 mergeNonIfConvertible();
343 }
344
345 /// Merges partitions in order to ensure that no loads are duplicated.
346 ///
347 /// We can't duplicate loads because that could potentially reorder them.
348 /// LoopAccessAnalysis provides dependency information with the context that
349 /// the order of memory operation is preserved.
350 ///
351 /// Return if any partitions were merged.
mergeToAvoidDuplicatedLoads()352 bool mergeToAvoidDuplicatedLoads() {
353 using LoadToPartitionT = DenseMap<Instruction *, InstPartition *>;
354 using ToBeMergedT = EquivalenceClasses<InstPartition *>;
355
356 LoadToPartitionT LoadToPartition;
357 ToBeMergedT ToBeMerged;
358
359 // Step through the partitions and create equivalence between partitions
360 // that contain the same load. Also put partitions in between them in the
361 // same equivalence class to avoid reordering of memory operations.
362 for (PartitionContainerT::iterator I = PartitionContainer.begin(),
363 E = PartitionContainer.end();
364 I != E; ++I) {
365 auto *PartI = &*I;
366
367 // If a load occurs in two partitions PartI and PartJ, merge all
368 // partitions (PartI, PartJ] into PartI.
369 for (Instruction *Inst : *PartI)
370 if (isa<LoadInst>(Inst)) {
371 bool NewElt;
372 LoadToPartitionT::iterator LoadToPart;
373
374 std::tie(LoadToPart, NewElt) =
375 LoadToPartition.insert(std::make_pair(Inst, PartI));
376 if (!NewElt) {
377 LLVM_DEBUG(dbgs()
378 << "Merging partitions due to this load in multiple "
379 << "partitions: " << PartI << ", " << LoadToPart->second
380 << "\n"
381 << *Inst << "\n");
382
383 auto PartJ = I;
384 do {
385 --PartJ;
386 ToBeMerged.unionSets(PartI, &*PartJ);
387 } while (&*PartJ != LoadToPart->second);
388 }
389 }
390 }
391 if (ToBeMerged.empty())
392 return false;
393
394 // Merge the member of an equivalence class into its class leader. This
395 // makes the members empty.
396 for (ToBeMergedT::iterator I = ToBeMerged.begin(), E = ToBeMerged.end();
397 I != E; ++I) {
398 if (!I->isLeader())
399 continue;
400
401 auto PartI = I->getData();
402 for (auto PartJ : make_range(std::next(ToBeMerged.member_begin(I)),
403 ToBeMerged.member_end())) {
404 PartJ->moveTo(*PartI);
405 }
406 }
407
408 // Remove the empty partitions.
409 PartitionContainer.remove_if(
410 [](const InstPartition &P) { return P.empty(); });
411
412 return true;
413 }
414
415 /// Sets up the mapping between instructions to partitions. If the
416 /// instruction is duplicated across multiple partitions, set the entry to -1.
setupPartitionIdOnInstructions()417 void setupPartitionIdOnInstructions() {
418 int PartitionID = 0;
419 for (const auto &Partition : PartitionContainer) {
420 for (Instruction *Inst : Partition) {
421 bool NewElt;
422 InstToPartitionIdT::iterator Iter;
423
424 std::tie(Iter, NewElt) =
425 InstToPartitionId.insert(std::make_pair(Inst, PartitionID));
426 if (!NewElt)
427 Iter->second = -1;
428 }
429 ++PartitionID;
430 }
431 }
432
433 /// Populates the partition with everything that the seeding
434 /// instructions require.
populateUsedSet()435 void populateUsedSet() {
436 for (auto &P : PartitionContainer)
437 P.populateUsedSet();
438 }
439
440 /// This performs the main chunk of the work of cloning the loops for
441 /// the partitions.
cloneLoops()442 void cloneLoops() {
443 BasicBlock *OrigPH = L->getLoopPreheader();
444 // At this point the predecessor of the preheader is either the memcheck
445 // block or the top part of the original preheader.
446 BasicBlock *Pred = OrigPH->getSinglePredecessor();
447 assert(Pred && "Preheader does not have a single predecessor");
448 BasicBlock *ExitBlock = L->getExitBlock();
449 assert(ExitBlock && "No single exit block");
450 Loop *NewLoop;
451
452 assert(!PartitionContainer.empty() && "at least two partitions expected");
453 // We're cloning the preheader along with the loop so we already made sure
454 // it was empty.
455 assert(&*OrigPH->begin() == OrigPH->getTerminator() &&
456 "preheader not empty");
457
458 // Preserve the original loop ID for use after the transformation.
459 MDNode *OrigLoopID = L->getLoopID();
460
461 // Create a loop for each partition except the last. Clone the original
462 // loop before PH along with adding a preheader for the cloned loop. Then
463 // update PH to point to the newly added preheader.
464 BasicBlock *TopPH = OrigPH;
465 unsigned Index = getSize() - 1;
466 for (auto I = std::next(PartitionContainer.rbegin()),
467 E = PartitionContainer.rend();
468 I != E; ++I, --Index, TopPH = NewLoop->getLoopPreheader()) {
469 auto *Part = &*I;
470
471 NewLoop = Part->cloneLoopWithPreheader(TopPH, Pred, Index, LI, DT);
472
473 Part->getVMap()[ExitBlock] = TopPH;
474 Part->remapInstructions();
475 setNewLoopID(OrigLoopID, Part);
476 }
477 Pred->getTerminator()->replaceUsesOfWith(OrigPH, TopPH);
478
479 // Also set a new loop ID for the last loop.
480 setNewLoopID(OrigLoopID, &PartitionContainer.back());
481
482 // Now go in forward order and update the immediate dominator for the
483 // preheaders with the exiting block of the previous loop. Dominance
484 // within the loop is updated in cloneLoopWithPreheader.
485 for (auto Curr = PartitionContainer.cbegin(),
486 Next = std::next(PartitionContainer.cbegin()),
487 E = PartitionContainer.cend();
488 Next != E; ++Curr, ++Next)
489 DT->changeImmediateDominator(
490 Next->getDistributedLoop()->getLoopPreheader(),
491 Curr->getDistributedLoop()->getExitingBlock());
492 }
493
494 /// Removes the dead instructions from the cloned loops.
removeUnusedInsts()495 void removeUnusedInsts() {
496 for (auto &Partition : PartitionContainer)
497 Partition.removeUnusedInsts();
498 }
499
500 /// For each memory pointer, it computes the partitionId the pointer is
501 /// used in.
502 ///
503 /// This returns an array of int where the I-th entry corresponds to I-th
504 /// entry in LAI.getRuntimePointerCheck(). If the pointer is used in multiple
505 /// partitions its entry is set to -1.
506 SmallVector<int, 8>
computePartitionSetForPointers(const LoopAccessInfo & LAI)507 computePartitionSetForPointers(const LoopAccessInfo &LAI) {
508 const RuntimePointerChecking *RtPtrCheck = LAI.getRuntimePointerChecking();
509
510 unsigned N = RtPtrCheck->Pointers.size();
511 SmallVector<int, 8> PtrToPartitions(N);
512 for (unsigned I = 0; I < N; ++I) {
513 Value *Ptr = RtPtrCheck->Pointers[I].PointerValue;
514 auto Instructions =
515 LAI.getInstructionsForAccess(Ptr, RtPtrCheck->Pointers[I].IsWritePtr);
516
517 int &Partition = PtrToPartitions[I];
518 // First set it to uninitialized.
519 Partition = -2;
520 for (Instruction *Inst : Instructions) {
521 // Note that this could be -1 if Inst is duplicated across multiple
522 // partitions.
523 int ThisPartition = this->InstToPartitionId[Inst];
524 if (Partition == -2)
525 Partition = ThisPartition;
526 // -1 means belonging to multiple partitions.
527 else if (Partition == -1)
528 break;
529 else if (Partition != (int)ThisPartition)
530 Partition = -1;
531 }
532 assert(Partition != -2 && "Pointer not belonging to any partition");
533 }
534
535 return PtrToPartitions;
536 }
537
print(raw_ostream & OS) const538 void print(raw_ostream &OS) const {
539 unsigned Index = 0;
540 for (const auto &P : PartitionContainer) {
541 OS << "Partition " << Index++ << " (" << &P << "):\n";
542 P.print();
543 }
544 }
545
dump() const546 void dump() const { print(dbgs()); }
547
548 #ifndef NDEBUG
operator <<(raw_ostream & OS,const InstPartitionContainer & Partitions)549 friend raw_ostream &operator<<(raw_ostream &OS,
550 const InstPartitionContainer &Partitions) {
551 Partitions.print(OS);
552 return OS;
553 }
554 #endif
555
printBlocks() const556 void printBlocks() const {
557 unsigned Index = 0;
558 for (const auto &P : PartitionContainer) {
559 dbgs() << "\nPartition " << Index++ << " (" << &P << "):\n";
560 P.printBlocks();
561 }
562 }
563
564 private:
565 using PartitionContainerT = std::list<InstPartition>;
566
567 /// List of partitions.
568 PartitionContainerT PartitionContainer;
569
570 /// Mapping from Instruction to partition Id. If the instruction
571 /// belongs to multiple partitions the entry contains -1.
572 InstToPartitionIdT InstToPartitionId;
573
574 Loop *L;
575 LoopInfo *LI;
576 DominatorTree *DT;
577
578 /// The control structure to merge adjacent partitions if both satisfy
579 /// the \p Predicate.
580 template <class UnaryPredicate>
mergeAdjacentPartitionsIf(UnaryPredicate Predicate)581 void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) {
582 InstPartition *PrevMatch = nullptr;
583 for (auto I = PartitionContainer.begin(); I != PartitionContainer.end();) {
584 auto DoesMatch = Predicate(&*I);
585 if (PrevMatch == nullptr && DoesMatch) {
586 PrevMatch = &*I;
587 ++I;
588 } else if (PrevMatch != nullptr && DoesMatch) {
589 I->moveTo(*PrevMatch);
590 I = PartitionContainer.erase(I);
591 } else {
592 PrevMatch = nullptr;
593 ++I;
594 }
595 }
596 }
597
598 /// Assign new LoopIDs for the partition's cloned loop.
setNewLoopID(MDNode * OrigLoopID,InstPartition * Part)599 void setNewLoopID(MDNode *OrigLoopID, InstPartition *Part) {
600 Optional<MDNode *> PartitionID = makeFollowupLoopID(
601 OrigLoopID,
602 {LLVMLoopDistributeFollowupAll,
603 Part->hasDepCycle() ? LLVMLoopDistributeFollowupSequential
604 : LLVMLoopDistributeFollowupCoincident});
605 if (PartitionID.hasValue()) {
606 Loop *NewLoop = Part->getDistributedLoop();
607 NewLoop->setLoopID(PartitionID.getValue());
608 }
609 }
610 };
611
612 /// For each memory instruction, this class maintains difference of the
613 /// number of unsafe dependences that start out from this instruction minus
614 /// those that end here.
615 ///
616 /// By traversing the memory instructions in program order and accumulating this
617 /// number, we know whether any unsafe dependence crosses over a program point.
618 class MemoryInstructionDependences {
619 using Dependence = MemoryDepChecker::Dependence;
620
621 public:
622 struct Entry {
623 Instruction *Inst;
624 unsigned NumUnsafeDependencesStartOrEnd = 0;
625
Entry__anonfcfe86310111::MemoryInstructionDependences::Entry626 Entry(Instruction *Inst) : Inst(Inst) {}
627 };
628
629 using AccessesType = SmallVector<Entry, 8>;
630
begin() const631 AccessesType::const_iterator begin() const { return Accesses.begin(); }
end() const632 AccessesType::const_iterator end() const { return Accesses.end(); }
633
MemoryInstructionDependences(const SmallVectorImpl<Instruction * > & Instructions,const SmallVectorImpl<Dependence> & Dependences)634 MemoryInstructionDependences(
635 const SmallVectorImpl<Instruction *> &Instructions,
636 const SmallVectorImpl<Dependence> &Dependences) {
637 Accesses.append(Instructions.begin(), Instructions.end());
638
639 LLVM_DEBUG(dbgs() << "Backward dependences:\n");
640 for (auto &Dep : Dependences)
641 if (Dep.isPossiblyBackward()) {
642 // Note that the designations source and destination follow the program
643 // order, i.e. source is always first. (The direction is given by the
644 // DepType.)
645 ++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd;
646 --Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd;
647
648 LLVM_DEBUG(Dep.print(dbgs(), 2, Instructions));
649 }
650 }
651
652 private:
653 AccessesType Accesses;
654 };
655
656 /// The actual class performing the per-loop work.
657 class LoopDistributeForLoop {
658 public:
LoopDistributeForLoop(Loop * L,Function * F,LoopInfo * LI,DominatorTree * DT,ScalarEvolution * SE,OptimizationRemarkEmitter * ORE)659 LoopDistributeForLoop(Loop *L, Function *F, LoopInfo *LI, DominatorTree *DT,
660 ScalarEvolution *SE, OptimizationRemarkEmitter *ORE)
661 : L(L), F(F), LI(LI), DT(DT), SE(SE), ORE(ORE) {
662 setForced();
663 }
664
665 /// Try to distribute an inner-most loop.
processLoop(std::function<const LoopAccessInfo & (Loop &)> & GetLAA)666 bool processLoop(std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
667 assert(L->empty() && "Only process inner loops.");
668
669 LLVM_DEBUG(dbgs() << "\nLDist: In \""
670 << L->getHeader()->getParent()->getName()
671 << "\" checking " << *L << "\n");
672
673 if (!L->getExitBlock())
674 return fail("MultipleExitBlocks", "multiple exit blocks");
675 if (!L->isLoopSimplifyForm())
676 return fail("NotLoopSimplifyForm",
677 "loop is not in loop-simplify form");
678
679 BasicBlock *PH = L->getLoopPreheader();
680
681 // LAA will check that we only have a single exiting block.
682 LAI = &GetLAA(*L);
683
684 // Currently, we only distribute to isolate the part of the loop with
685 // dependence cycles to enable partial vectorization.
686 if (LAI->canVectorizeMemory())
687 return fail("MemOpsCanBeVectorized",
688 "memory operations are safe for vectorization");
689
690 auto *Dependences = LAI->getDepChecker().getDependences();
691 if (!Dependences || Dependences->empty())
692 return fail("NoUnsafeDeps", "no unsafe dependences to isolate");
693
694 InstPartitionContainer Partitions(L, LI, DT);
695
696 // First, go through each memory operation and assign them to consecutive
697 // partitions (the order of partitions follows program order). Put those
698 // with unsafe dependences into "cyclic" partition otherwise put each store
699 // in its own "non-cyclic" partition (we'll merge these later).
700 //
701 // Note that a memory operation (e.g. Load2 below) at a program point that
702 // has an unsafe dependence (Store3->Load1) spanning over it must be
703 // included in the same cyclic partition as the dependent operations. This
704 // is to preserve the original program order after distribution. E.g.:
705 //
706 // NumUnsafeDependencesStartOrEnd NumUnsafeDependencesActive
707 // Load1 -. 1 0->1
708 // Load2 | /Unsafe/ 0 1
709 // Store3 -' -1 1->0
710 // Load4 0 0
711 //
712 // NumUnsafeDependencesActive > 0 indicates this situation and in this case
713 // we just keep assigning to the same cyclic partition until
714 // NumUnsafeDependencesActive reaches 0.
715 const MemoryDepChecker &DepChecker = LAI->getDepChecker();
716 MemoryInstructionDependences MID(DepChecker.getMemoryInstructions(),
717 *Dependences);
718
719 int NumUnsafeDependencesActive = 0;
720 for (auto &InstDep : MID) {
721 Instruction *I = InstDep.Inst;
722 // We update NumUnsafeDependencesActive post-instruction, catch the
723 // start of a dependence directly via NumUnsafeDependencesStartOrEnd.
724 if (NumUnsafeDependencesActive ||
725 InstDep.NumUnsafeDependencesStartOrEnd > 0)
726 Partitions.addToCyclicPartition(I);
727 else
728 Partitions.addToNewNonCyclicPartition(I);
729 NumUnsafeDependencesActive += InstDep.NumUnsafeDependencesStartOrEnd;
730 assert(NumUnsafeDependencesActive >= 0 &&
731 "Negative number of dependences active");
732 }
733
734 // Add partitions for values used outside. These partitions can be out of
735 // order from the original program order. This is OK because if the
736 // partition uses a load we will merge this partition with the original
737 // partition of the load that we set up in the previous loop (see
738 // mergeToAvoidDuplicatedLoads).
739 auto DefsUsedOutside = findDefsUsedOutsideOfLoop(L);
740 for (auto *Inst : DefsUsedOutside)
741 Partitions.addToNewNonCyclicPartition(Inst);
742
743 LLVM_DEBUG(dbgs() << "Seeded partitions:\n" << Partitions);
744 if (Partitions.getSize() < 2)
745 return fail("CantIsolateUnsafeDeps",
746 "cannot isolate unsafe dependencies");
747
748 // Run the merge heuristics: Merge non-cyclic adjacent partitions since we
749 // should be able to vectorize these together.
750 Partitions.mergeBeforePopulating();
751 LLVM_DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions);
752 if (Partitions.getSize() < 2)
753 return fail("CantIsolateUnsafeDeps",
754 "cannot isolate unsafe dependencies");
755
756 // Now, populate the partitions with non-memory operations.
757 Partitions.populateUsedSet();
758 LLVM_DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions);
759
760 // In order to preserve original lexical order for loads, keep them in the
761 // partition that we set up in the MemoryInstructionDependences loop.
762 if (Partitions.mergeToAvoidDuplicatedLoads()) {
763 LLVM_DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n"
764 << Partitions);
765 if (Partitions.getSize() < 2)
766 return fail("CantIsolateUnsafeDeps",
767 "cannot isolate unsafe dependencies");
768 }
769
770 // Don't distribute the loop if we need too many SCEV run-time checks.
771 const SCEVUnionPredicate &Pred = LAI->getPSE().getUnionPredicate();
772 if (Pred.getComplexity() > (IsForced.getValueOr(false)
773 ? PragmaDistributeSCEVCheckThreshold
774 : DistributeSCEVCheckThreshold))
775 return fail("TooManySCEVRuntimeChecks",
776 "too many SCEV run-time checks needed.\n");
777
778 if (!IsForced.getValueOr(false) && hasDisableAllTransformsHint(L))
779 return fail("HeuristicDisabled", "distribution heuristic disabled");
780
781 LLVM_DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n");
782 // We're done forming the partitions set up the reverse mapping from
783 // instructions to partitions.
784 Partitions.setupPartitionIdOnInstructions();
785
786 // To keep things simple have an empty preheader before we version or clone
787 // the loop. (Also split if this has no predecessor, i.e. entry, because we
788 // rely on PH having a predecessor.)
789 if (!PH->getSinglePredecessor() || &*PH->begin() != PH->getTerminator())
790 SplitBlock(PH, PH->getTerminator(), DT, LI);
791
792 // If we need run-time checks, version the loop now.
793 auto PtrToPartition = Partitions.computePartitionSetForPointers(*LAI);
794 const auto *RtPtrChecking = LAI->getRuntimePointerChecking();
795 const auto &AllChecks = RtPtrChecking->getChecks();
796 auto Checks = includeOnlyCrossPartitionChecks(AllChecks, PtrToPartition,
797 RtPtrChecking);
798
799 if (!Pred.isAlwaysTrue() || !Checks.empty()) {
800 MDNode *OrigLoopID = L->getLoopID();
801
802 LLVM_DEBUG(dbgs() << "\nPointers:\n");
803 LLVM_DEBUG(LAI->getRuntimePointerChecking()->printChecks(dbgs(), Checks));
804 LoopVersioning LVer(*LAI, L, LI, DT, SE, false);
805 LVer.setAliasChecks(std::move(Checks));
806 LVer.setSCEVChecks(LAI->getPSE().getUnionPredicate());
807 LVer.versionLoop(DefsUsedOutside);
808 LVer.annotateLoopWithNoAlias();
809
810 // The unversioned loop will not be changed, so we inherit all attributes
811 // from the original loop, but remove the loop distribution metadata to
812 // avoid to distribute it again.
813 MDNode *UnversionedLoopID =
814 makeFollowupLoopID(OrigLoopID,
815 {LLVMLoopDistributeFollowupAll,
816 LLVMLoopDistributeFollowupFallback},
817 "llvm.loop.distribute.", true)
818 .getValue();
819 LVer.getNonVersionedLoop()->setLoopID(UnversionedLoopID);
820 }
821
822 // Create identical copies of the original loop for each partition and hook
823 // them up sequentially.
824 Partitions.cloneLoops();
825
826 // Now, we remove the instruction from each loop that don't belong to that
827 // partition.
828 Partitions.removeUnusedInsts();
829 LLVM_DEBUG(dbgs() << "\nAfter removing unused Instrs:\n");
830 LLVM_DEBUG(Partitions.printBlocks());
831
832 if (LDistVerify) {
833 LI->verify(*DT);
834 assert(DT->verify(DominatorTree::VerificationLevel::Fast));
835 }
836
837 ++NumLoopsDistributed;
838 // Report the success.
839 ORE->emit([&]() {
840 return OptimizationRemark(LDIST_NAME, "Distribute", L->getStartLoc(),
841 L->getHeader())
842 << "distributed loop";
843 });
844 return true;
845 }
846
847 /// Provide diagnostics then \return with false.
fail(StringRef RemarkName,StringRef Message)848 bool fail(StringRef RemarkName, StringRef Message) {
849 LLVMContext &Ctx = F->getContext();
850 bool Forced = isForced().getValueOr(false);
851
852 LLVM_DEBUG(dbgs() << "Skipping; " << Message << "\n");
853
854 // With Rpass-missed report that distribution failed.
855 ORE->emit([&]() {
856 return OptimizationRemarkMissed(LDIST_NAME, "NotDistributed",
857 L->getStartLoc(), L->getHeader())
858 << "loop not distributed: use -Rpass-analysis=loop-distribute for "
859 "more "
860 "info";
861 });
862
863 // With Rpass-analysis report why. This is on by default if distribution
864 // was requested explicitly.
865 ORE->emit(OptimizationRemarkAnalysis(
866 Forced ? OptimizationRemarkAnalysis::AlwaysPrint : LDIST_NAME,
867 RemarkName, L->getStartLoc(), L->getHeader())
868 << "loop not distributed: " << Message);
869
870 // Also issue a warning if distribution was requested explicitly but it
871 // failed.
872 if (Forced)
873 Ctx.diagnose(DiagnosticInfoOptimizationFailure(
874 *F, L->getStartLoc(), "loop not distributed: failed "
875 "explicitly specified loop distribution"));
876
877 return false;
878 }
879
880 /// Return if distribution forced to be enabled/disabled for the loop.
881 ///
882 /// If the optional has a value, it indicates whether distribution was forced
883 /// to be enabled (true) or disabled (false). If the optional has no value
884 /// distribution was not forced either way.
isForced() const885 const Optional<bool> &isForced() const { return IsForced; }
886
887 private:
888 /// Filter out checks between pointers from the same partition.
889 ///
890 /// \p PtrToPartition contains the partition number for pointers. Partition
891 /// number -1 means that the pointer is used in multiple partitions. In this
892 /// case we can't safely omit the check.
893 SmallVector<RuntimePointerChecking::PointerCheck, 4>
includeOnlyCrossPartitionChecks(const SmallVectorImpl<RuntimePointerChecking::PointerCheck> & AllChecks,const SmallVectorImpl<int> & PtrToPartition,const RuntimePointerChecking * RtPtrChecking)894 includeOnlyCrossPartitionChecks(
895 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &AllChecks,
896 const SmallVectorImpl<int> &PtrToPartition,
897 const RuntimePointerChecking *RtPtrChecking) {
898 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks;
899
900 copy_if(AllChecks, std::back_inserter(Checks),
901 [&](const RuntimePointerChecking::PointerCheck &Check) {
902 for (unsigned PtrIdx1 : Check.first->Members)
903 for (unsigned PtrIdx2 : Check.second->Members)
904 // Only include this check if there is a pair of pointers
905 // that require checking and the pointers fall into
906 // separate partitions.
907 //
908 // (Note that we already know at this point that the two
909 // pointer groups need checking but it doesn't follow
910 // that each pair of pointers within the two groups need
911 // checking as well.
912 //
913 // In other words we don't want to include a check just
914 // because there is a pair of pointers between the two
915 // pointer groups that require checks and a different
916 // pair whose pointers fall into different partitions.)
917 if (RtPtrChecking->needsChecking(PtrIdx1, PtrIdx2) &&
918 !RuntimePointerChecking::arePointersInSamePartition(
919 PtrToPartition, PtrIdx1, PtrIdx2))
920 return true;
921 return false;
922 });
923
924 return Checks;
925 }
926
927 /// Check whether the loop metadata is forcing distribution to be
928 /// enabled/disabled.
setForced()929 void setForced() {
930 Optional<const MDOperand *> Value =
931 findStringMetadataForLoop(L, "llvm.loop.distribute.enable");
932 if (!Value)
933 return;
934
935 const MDOperand *Op = *Value;
936 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
937 IsForced = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
938 }
939
940 Loop *L;
941 Function *F;
942
943 // Analyses used.
944 LoopInfo *LI;
945 const LoopAccessInfo *LAI = nullptr;
946 DominatorTree *DT;
947 ScalarEvolution *SE;
948 OptimizationRemarkEmitter *ORE;
949
950 /// Indicates whether distribution is forced to be enabled/disabled for
951 /// the loop.
952 ///
953 /// If the optional has a value, it indicates whether distribution was forced
954 /// to be enabled (true) or disabled (false). If the optional has no value
955 /// distribution was not forced either way.
956 Optional<bool> IsForced;
957 };
958
959 } // end anonymous namespace
960
961 /// Shared implementation between new and old PMs.
runImpl(Function & F,LoopInfo * LI,DominatorTree * DT,ScalarEvolution * SE,OptimizationRemarkEmitter * ORE,std::function<const LoopAccessInfo & (Loop &)> & GetLAA)962 static bool runImpl(Function &F, LoopInfo *LI, DominatorTree *DT,
963 ScalarEvolution *SE, OptimizationRemarkEmitter *ORE,
964 std::function<const LoopAccessInfo &(Loop &)> &GetLAA) {
965 // Build up a worklist of inner-loops to vectorize. This is necessary as the
966 // act of distributing a loop creates new loops and can invalidate iterators
967 // across the loops.
968 SmallVector<Loop *, 8> Worklist;
969
970 for (Loop *TopLevelLoop : *LI)
971 for (Loop *L : depth_first(TopLevelLoop))
972 // We only handle inner-most loops.
973 if (L->empty())
974 Worklist.push_back(L);
975
976 // Now walk the identified inner loops.
977 bool Changed = false;
978 for (Loop *L : Worklist) {
979 LoopDistributeForLoop LDL(L, &F, LI, DT, SE, ORE);
980
981 // If distribution was forced for the specific loop to be
982 // enabled/disabled, follow that. Otherwise use the global flag.
983 if (LDL.isForced().getValueOr(EnableLoopDistribute))
984 Changed |= LDL.processLoop(GetLAA);
985 }
986
987 // Process each loop nest in the function.
988 return Changed;
989 }
990
991 namespace {
992
993 /// The pass class.
994 class LoopDistributeLegacy : public FunctionPass {
995 public:
996 static char ID;
997
LoopDistributeLegacy()998 LoopDistributeLegacy() : FunctionPass(ID) {
999 // The default is set by the caller.
1000 initializeLoopDistributeLegacyPass(*PassRegistry::getPassRegistry());
1001 }
1002
runOnFunction(Function & F)1003 bool runOnFunction(Function &F) override {
1004 if (skipFunction(F))
1005 return false;
1006
1007 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1008 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
1009 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1010 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1011 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1012 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1013 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
1014
1015 return runImpl(F, LI, DT, SE, ORE, GetLAA);
1016 }
1017
getAnalysisUsage(AnalysisUsage & AU) const1018 void getAnalysisUsage(AnalysisUsage &AU) const override {
1019 AU.addRequired<ScalarEvolutionWrapperPass>();
1020 AU.addRequired<LoopInfoWrapperPass>();
1021 AU.addPreserved<LoopInfoWrapperPass>();
1022 AU.addRequired<LoopAccessLegacyAnalysis>();
1023 AU.addRequired<DominatorTreeWrapperPass>();
1024 AU.addPreserved<DominatorTreeWrapperPass>();
1025 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1026 AU.addPreserved<GlobalsAAWrapperPass>();
1027 }
1028 };
1029
1030 } // end anonymous namespace
1031
run(Function & F,FunctionAnalysisManager & AM)1032 PreservedAnalyses LoopDistributePass::run(Function &F,
1033 FunctionAnalysisManager &AM) {
1034 auto &LI = AM.getResult<LoopAnalysis>(F);
1035 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1036 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
1037 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
1038
1039 // We don't directly need these analyses but they're required for loop
1040 // analyses so provide them below.
1041 auto &AA = AM.getResult<AAManager>(F);
1042 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1043 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1044 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1045
1046 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
1047 std::function<const LoopAccessInfo &(Loop &)> GetLAA =
1048 [&](Loop &L) -> const LoopAccessInfo & {
1049 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr};
1050 return LAM.getResult<LoopAccessAnalysis>(L, AR);
1051 };
1052
1053 bool Changed = runImpl(F, &LI, &DT, &SE, &ORE, GetLAA);
1054 if (!Changed)
1055 return PreservedAnalyses::all();
1056 PreservedAnalyses PA;
1057 PA.preserve<LoopAnalysis>();
1058 PA.preserve<DominatorTreeAnalysis>();
1059 PA.preserve<GlobalsAA>();
1060 return PA;
1061 }
1062
1063 char LoopDistributeLegacy::ID;
1064
1065 static const char ldist_name[] = "Loop Distribution";
1066
INITIALIZE_PASS_BEGIN(LoopDistributeLegacy,LDIST_NAME,ldist_name,false,false)1067 INITIALIZE_PASS_BEGIN(LoopDistributeLegacy, LDIST_NAME, ldist_name, false,
1068 false)
1069 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
1070 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
1071 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1072 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
1073 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
1074 INITIALIZE_PASS_END(LoopDistributeLegacy, LDIST_NAME, ldist_name, false, false)
1075
1076 FunctionPass *llvm::createLoopDistributePass() { return new LoopDistributeLegacy(); }
1077