1 //===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Small functions that help with Scop and LLVM-IR.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "polly/Support/ScopHelper.h"
15 #include "polly/Options.h"
16 #include "polly/ScopInfo.h"
17 #include "polly/Support/SCEVValidator.h"
18 #include "llvm/Analysis/LoopInfo.h"
19 #include "llvm/Analysis/RegionInfo.h"
20 #include "llvm/Analysis/ScalarEvolution.h"
21 #include "llvm/Analysis/ScalarEvolutionExpander.h"
22 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
23 #include "llvm/IR/CFG.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
27 
28 using namespace llvm;
29 using namespace polly;
30 
31 #define DEBUG_TYPE "polly-scop-helper"
32 
33 static cl::opt<bool> PollyAllowErrorBlocks(
34     "polly-allow-error-blocks",
35     cl::desc("Allow to speculate on the execution of 'error blocks'."),
36     cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
37 
38 // Ensures that there is just one predecessor to the entry node from outside the
39 // region.
40 // The identity of the region entry node is preserved.
41 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
42                                 RegionInfo *RI) {
43   BasicBlock *EnteringBB = R->getEnteringBlock();
44   BasicBlock *Entry = R->getEntry();
45 
46   // Before (one of):
47   //
48   //                       \    /            //
49   //                      EnteringBB         //
50   //                        |    \------>    //
51   //   \   /                |                //
52   //   Entry <--\         Entry <--\         //
53   //   /   \    /         /   \    /         //
54   //        ....               ....          //
55 
56   // Create single entry edge if the region has multiple entry edges.
57   if (!EnteringBB) {
58     SmallVector<BasicBlock *, 4> Preds;
59     for (BasicBlock *P : predecessors(Entry))
60       if (!R->contains(P))
61         Preds.push_back(P);
62 
63     BasicBlock *NewEntering =
64         SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
65 
66     if (RI) {
67       // The exit block of predecessing regions must be changed to NewEntering
68       for (BasicBlock *ExitPred : predecessors(NewEntering)) {
69         Region *RegionOfPred = RI->getRegionFor(ExitPred);
70         if (RegionOfPred->getExit() != Entry)
71           continue;
72 
73         while (!RegionOfPred->isTopLevelRegion() &&
74                RegionOfPred->getExit() == Entry) {
75           RegionOfPred->replaceExit(NewEntering);
76           RegionOfPred = RegionOfPred->getParent();
77         }
78       }
79 
80       // Make all ancestors use EnteringBB as entry; there might be edges to it
81       Region *AncestorR = R->getParent();
82       RI->setRegionFor(NewEntering, AncestorR);
83       while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) {
84         AncestorR->replaceEntry(NewEntering);
85         AncestorR = AncestorR->getParent();
86       }
87     }
88 
89     EnteringBB = NewEntering;
90   }
91   assert(R->getEnteringBlock() == EnteringBB);
92 
93   // After:
94   //
95   //    \    /       //
96   //  EnteringBB     //
97   //      |          //
98   //      |          //
99   //    Entry <--\   //
100   //    /   \    /   //
101   //         ....    //
102 }
103 
104 // Ensure that the region has a single block that branches to the exit node.
105 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
106                                RegionInfo *RI) {
107   BasicBlock *ExitBB = R->getExit();
108   BasicBlock *ExitingBB = R->getExitingBlock();
109 
110   // Before:
111   //
112   //   (Region)   ______/  //
113   //      \  |   /         //
114   //       ExitBB          //
115   //       /    \          //
116 
117   if (!ExitingBB) {
118     SmallVector<BasicBlock *, 4> Preds;
119     for (BasicBlock *P : predecessors(ExitBB))
120       if (R->contains(P))
121         Preds.push_back(P);
122 
123     //  Preds[0] Preds[1]      otherBB //
124     //         \  |  ________/         //
125     //          \ | /                  //
126     //           BB                    //
127     ExitingBB =
128         SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
129     // Preds[0] Preds[1]      otherBB  //
130     //        \  /           /         //
131     // BB.region_exiting    /          //
132     //                  \  /           //
133     //                   BB            //
134 
135     if (RI)
136       RI->setRegionFor(ExitingBB, R);
137 
138     // Change the exit of nested regions, but not the region itself,
139     R->replaceExitRecursive(ExitingBB);
140     R->replaceExit(ExitBB);
141   }
142   assert(ExitingBB == R->getExitingBlock());
143 
144   // After:
145   //
146   //     \   /                //
147   //    ExitingBB     _____/  //
148   //          \      /        //
149   //           ExitBB         //
150   //           /    \         //
151 }
152 
153 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
154                            RegionInfo *RI) {
155   assert(R && !R->isTopLevelRegion());
156   assert(!RI || RI == R->getRegionInfo());
157   assert((!RI || DT) &&
158          "RegionInfo requires DominatorTree to be updated as well");
159 
160   simplifyRegionEntry(R, DT, LI, RI);
161   simplifyRegionExit(R, DT, LI, RI);
162   assert(R->isSimple());
163 }
164 
165 // Split the block into two successive blocks.
166 //
167 // Like llvm::SplitBlock, but also preserves RegionInfo
168 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
169                               DominatorTree *DT, llvm::LoopInfo *LI,
170                               RegionInfo *RI) {
171   assert(Old && SplitPt);
172 
173   // Before:
174   //
175   //  \   /  //
176   //   Old   //
177   //  /   \  //
178 
179   BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
180 
181   if (RI) {
182     Region *R = RI->getRegionFor(Old);
183     RI->setRegionFor(NewBlock, R);
184   }
185 
186   // After:
187   //
188   //   \   /    //
189   //    Old     //
190   //     |      //
191   //  NewBlock  //
192   //   /   \    //
193 
194   return NewBlock;
195 }
196 
197 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
198                                      LoopInfo *LI, RegionInfo *RI) {
199   // Find first non-alloca instruction. Every basic block has a non-alloca
200   // instruction, as every well formed basic block has a terminator.
201   BasicBlock::iterator I = EntryBlock->begin();
202   while (isa<AllocaInst>(I))
203     ++I;
204 
205   // splitBlock updates DT, LI and RI.
206   splitBlock(EntryBlock, &*I, DT, LI, RI);
207 }
208 
209 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
210   auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
211   auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
212   auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
213   auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
214   RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
215   RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr;
216 
217   // splitBlock updates DT, LI and RI.
218   polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
219 }
220 
221 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
222 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want
223 /// however to generate new code if the instruction is in the analyzed region
224 /// and we generate code outside/in front of that region. Hence, we generate the
225 /// code for the SDiv/SRem operands in front of the analyzed region and then
226 /// create a new SDiv/SRem operation there too.
227 struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
228   friend struct SCEVVisitor<ScopExpander, const SCEV *>;
229 
230   explicit ScopExpander(const Region &R, ScalarEvolution &SE,
231                         const DataLayout &DL, const char *Name, ValueMapT *VMap,
232                         BasicBlock *RTCBB)
233       : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
234         VMap(VMap), RTCBB(RTCBB) {}
235 
236   Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
237     // If we generate code in the region we will immediately fall back to the
238     // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
239     // needed replace them by copies computed in the entering block.
240     if (!R.contains(I))
241       E = visit(E);
242     return Expander.expandCodeFor(E, Ty, I);
243   }
244 
245 private:
246   SCEVExpander Expander;
247   ScalarEvolution &SE;
248   const char *Name;
249   const Region &R;
250   ValueMapT *VMap;
251   BasicBlock *RTCBB;
252 
253   const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
254                                Instruction *IP) {
255     if (!Inst || !R.contains(Inst))
256       return E;
257 
258     assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
259            !isa<PHINode>(Inst));
260 
261     auto *InstClone = Inst->clone();
262     for (auto &Op : Inst->operands()) {
263       assert(SE.isSCEVable(Op->getType()));
264       auto *OpSCEV = SE.getSCEV(Op);
265       auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
266       InstClone->replaceUsesOfWith(Op, OpClone);
267     }
268 
269     InstClone->setName(Name + Inst->getName());
270     InstClone->insertBefore(IP);
271     return SE.getSCEV(InstClone);
272   }
273 
274   const SCEV *visitUnknown(const SCEVUnknown *E) {
275 
276     // If a value mapping was given try if the underlying value is remapped.
277     Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr;
278     if (NewVal) {
279       auto *NewE = SE.getSCEV(NewVal);
280 
281       // While the mapped value might be different the SCEV representation might
282       // not be. To this end we will check before we go into recursion here.
283       if (E != NewE)
284         return visit(NewE);
285     }
286 
287     Instruction *Inst = dyn_cast<Instruction>(E->getValue());
288     Instruction *IP;
289     if (Inst && !R.contains(Inst))
290       IP = Inst;
291     else if (Inst && RTCBB->getParent() == Inst->getFunction())
292       IP = RTCBB->getTerminator();
293     else
294       IP = RTCBB->getParent()->getEntryBlock().getTerminator();
295 
296     if (!Inst || (Inst->getOpcode() != Instruction::SRem &&
297                   Inst->getOpcode() != Instruction::SDiv))
298       return visitGenericInst(E, Inst, IP);
299 
300     const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
301     const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
302 
303     if (!SE.isKnownNonZero(RHSScev))
304       RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
305 
306     Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
307     Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
308 
309     Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
310                                   LHS, RHS, Inst->getName() + Name, IP);
311     return SE.getSCEV(Inst);
312   }
313 
314   /// The following functions will just traverse the SCEV and rebuild it with
315   /// the new operands returned by the traversal.
316   ///
317   ///{
318   const SCEV *visitConstant(const SCEVConstant *E) { return E; }
319   const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
320     return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
321   }
322   const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
323     return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
324   }
325   const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
326     return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
327   }
328   const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
329     auto *RHSScev = visit(E->getRHS());
330     if (!SE.isKnownNonZero(RHSScev))
331       RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
332     return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
333   }
334   const SCEV *visitAddExpr(const SCEVAddExpr *E) {
335     SmallVector<const SCEV *, 4> NewOps;
336     for (const SCEV *Op : E->operands())
337       NewOps.push_back(visit(Op));
338     return SE.getAddExpr(NewOps);
339   }
340   const SCEV *visitMulExpr(const SCEVMulExpr *E) {
341     SmallVector<const SCEV *, 4> NewOps;
342     for (const SCEV *Op : E->operands())
343       NewOps.push_back(visit(Op));
344     return SE.getMulExpr(NewOps);
345   }
346   const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
347     SmallVector<const SCEV *, 4> NewOps;
348     for (const SCEV *Op : E->operands())
349       NewOps.push_back(visit(Op));
350     return SE.getUMaxExpr(NewOps);
351   }
352   const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
353     SmallVector<const SCEV *, 4> NewOps;
354     for (const SCEV *Op : E->operands())
355       NewOps.push_back(visit(Op));
356     return SE.getSMaxExpr(NewOps);
357   }
358   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
359     SmallVector<const SCEV *, 4> NewOps;
360     for (const SCEV *Op : E->operands())
361       NewOps.push_back(visit(Op));
362     return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
363   }
364   ///}
365 };
366 
367 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
368                             const char *Name, const SCEV *E, Type *Ty,
369                             Instruction *IP, ValueMapT *VMap,
370                             BasicBlock *RTCBB) {
371   ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
372   return Expander.expandCodeFor(E, Ty, IP);
373 }
374 
375 bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
376                          const DominatorTree &DT) {
377   if (!PollyAllowErrorBlocks)
378     return false;
379 
380   if (isa<UnreachableInst>(BB.getTerminator()))
381     return true;
382 
383   if (LI.isLoopHeader(&BB))
384     return false;
385 
386   // Basic blocks that are always executed are not considered error blocks,
387   // as their execution can not be a rare event.
388   bool DominatesAllPredecessors = true;
389   if (R.isTopLevelRegion()) {
390     for (BasicBlock &I : *R.getEntry()->getParent())
391       if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
392         DominatesAllPredecessors = false;
393   } else {
394     for (auto Pred : predecessors(R.getExit()))
395       if (R.contains(Pred) && !DT.dominates(&BB, Pred))
396         DominatesAllPredecessors = false;
397   }
398 
399   if (DominatesAllPredecessors)
400     return false;
401 
402   // FIXME: This is a simple heuristic to determine if the load is executed
403   //        in a conditional. However, we actually would need the control
404   //        condition, i.e., the post dominance frontier. Alternatively we
405   //        could walk up the dominance tree until we find a block that is
406   //        not post dominated by the load and check if it is a conditional
407   //        or a loop header.
408   auto *DTNode = DT.getNode(&BB);
409   if (!DTNode)
410     return false;
411 
412   DTNode = DTNode->getIDom();
413 
414   if (!DTNode)
415     return false;
416 
417   auto *IDomBB = DTNode->getBlock();
418   if (LI.isLoopHeader(IDomBB))
419     return false;
420 
421   for (Instruction &Inst : BB)
422     if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
423       if (isIgnoredIntrinsic(CI))
424         continue;
425 
426       // memset, memcpy and memmove are modeled intrinsics.
427       if (isa<MemSetInst>(CI) || isa<MemTransferInst>(CI))
428         continue;
429 
430       if (!CI->doesNotAccessMemory())
431         return true;
432       if (CI->doesNotReturn())
433         return true;
434     }
435 
436   return false;
437 }
438 
439 Value *polly::getConditionFromTerminator(TerminatorInst *TI) {
440   if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
441     if (BR->isUnconditional())
442       return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
443 
444     return BR->getCondition();
445   }
446 
447   if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
448     return SI->getCondition();
449 
450   return nullptr;
451 }
452 
453 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
454                             ScalarEvolution &SE, const DominatorTree &DT) {
455   Loop *L = LI.getLoopFor(LInst->getParent());
456   auto *Ptr = LInst->getPointerOperand();
457   const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
458   while (L && R.contains(L)) {
459     if (!SE.isLoopInvariant(PtrSCEV, L))
460       return false;
461     L = L->getParentLoop();
462   }
463 
464   for (auto *User : Ptr->users()) {
465     auto *UserI = dyn_cast<Instruction>(User);
466     if (!UserI || !R.contains(UserI))
467       continue;
468     if (!UserI->mayWriteToMemory())
469       continue;
470 
471     auto &BB = *UserI->getParent();
472     if (DT.dominates(&BB, LInst->getParent()))
473       return false;
474 
475     bool DominatesAllPredecessors = true;
476     if (R.isTopLevelRegion()) {
477       for (BasicBlock &I : *R.getEntry()->getParent())
478         if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
479           DominatesAllPredecessors = false;
480     } else {
481       for (auto Pred : predecessors(R.getExit()))
482         if (R.contains(Pred) && !DT.dominates(&BB, Pred))
483           DominatesAllPredecessors = false;
484     }
485 
486     if (!DominatesAllPredecessors)
487       continue;
488 
489     return false;
490   }
491 
492   return true;
493 }
494 
495 bool polly::isIgnoredIntrinsic(const Value *V) {
496   if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
497     switch (IT->getIntrinsicID()) {
498     // Lifetime markers are supported/ignored.
499     case llvm::Intrinsic::lifetime_start:
500     case llvm::Intrinsic::lifetime_end:
501     // Invariant markers are supported/ignored.
502     case llvm::Intrinsic::invariant_start:
503     case llvm::Intrinsic::invariant_end:
504     // Some misc annotations are supported/ignored.
505     case llvm::Intrinsic::var_annotation:
506     case llvm::Intrinsic::ptr_annotation:
507     case llvm::Intrinsic::annotation:
508     case llvm::Intrinsic::donothing:
509     case llvm::Intrinsic::assume:
510     // Some debug info intrinsics are supported/ignored.
511     case llvm::Intrinsic::dbg_value:
512     case llvm::Intrinsic::dbg_declare:
513       return true;
514     default:
515       break;
516     }
517   }
518   return false;
519 }
520 
521 bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
522                           Loop *Scope) {
523   if (!V || !SE->isSCEVable(V->getType()))
524     return false;
525 
526   const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
527   if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
528     if (!isa<SCEVCouldNotCompute>(Scev))
529       if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
530         return true;
531 
532   return false;
533 }
534 
535 llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
536   Instruction *UI = dyn_cast<Instruction>(U.getUser());
537   if (!UI)
538     return nullptr;
539 
540   if (PHINode *PHI = dyn_cast<PHINode>(UI))
541     return PHI->getIncomingBlock(U);
542 
543   return UI->getParent();
544 }
545 
546 std::tuple<std::vector<const SCEV *>, std::vector<int>>
547 polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
548   std::vector<const SCEV *> Subscripts;
549   std::vector<int> Sizes;
550 
551   Type *Ty = GEP->getPointerOperandType();
552 
553   bool DroppedFirstDim = false;
554 
555   for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
556 
557     const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
558 
559     if (i == 1) {
560       if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
561         Ty = PtrTy->getElementType();
562       } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) {
563         Ty = ArrayTy->getElementType();
564       } else {
565         Subscripts.clear();
566         Sizes.clear();
567         break;
568       }
569       if (auto *Const = dyn_cast<SCEVConstant>(Expr))
570         if (Const->getValue()->isZero()) {
571           DroppedFirstDim = true;
572           continue;
573         }
574       Subscripts.push_back(Expr);
575       continue;
576     }
577 
578     auto *ArrayTy = dyn_cast<ArrayType>(Ty);
579     if (!ArrayTy) {
580       Subscripts.clear();
581       Sizes.clear();
582       break;
583     }
584 
585     Subscripts.push_back(Expr);
586     if (!(DroppedFirstDim && i == 2))
587       Sizes.push_back(ArrayTy->getNumElements());
588 
589     Ty = ArrayTy->getElementType();
590   }
591 
592   return std::make_tuple(Subscripts, Sizes);
593 }
594 
595 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
596                                            const BoxedLoopsSetTy &BoxedLoops) {
597   while (BoxedLoops.count(L))
598     L = L->getParentLoop();
599   return L;
600 }
601 
602 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
603                                            llvm::LoopInfo &LI,
604                                            const BoxedLoopsSetTy &BoxedLoops) {
605   Loop *L = LI.getLoopFor(BB);
606   return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
607 }
608