1 //===- ScopHelper.cpp - Some Helper Functions for Scop. ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Small functions that help with Scop and LLVM-IR.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "polly/Support/ScopHelper.h"
14 #include "polly/Options.h"
15 #include "polly/ScopInfo.h"
16 #include "polly/Support/SCEVValidator.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/RegionInfo.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
22 #include "llvm/Transforms/Utils/LoopUtils.h"
23 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
24
25 using namespace llvm;
26 using namespace polly;
27
28 #define DEBUG_TYPE "polly-scop-helper"
29
30 static cl::list<std::string> DebugFunctions(
31 "polly-debug-func",
32 cl::desc("Allow calls to the specified functions in SCoPs even if their "
33 "side-effects are unknown. This can be used to do debug output in "
34 "Polly-transformed code."),
35 cl::Hidden, cl::CommaSeparated, cl::cat(PollyCategory));
36
37 // Ensures that there is just one predecessor to the entry node from outside the
38 // region.
39 // The identity of the region entry node is preserved.
simplifyRegionEntry(Region * R,DominatorTree * DT,LoopInfo * LI,RegionInfo * RI)40 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
41 RegionInfo *RI) {
42 BasicBlock *EnteringBB = R->getEnteringBlock();
43 BasicBlock *Entry = R->getEntry();
44
45 // Before (one of):
46 //
47 // \ / //
48 // EnteringBB //
49 // | \------> //
50 // \ / | //
51 // Entry <--\ Entry <--\ //
52 // / \ / / \ / //
53 // .... .... //
54
55 // Create single entry edge if the region has multiple entry edges.
56 if (!EnteringBB) {
57 SmallVector<BasicBlock *, 4> Preds;
58 for (BasicBlock *P : predecessors(Entry))
59 if (!R->contains(P))
60 Preds.push_back(P);
61
62 BasicBlock *NewEntering =
63 SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
64
65 if (RI) {
66 // The exit block of predecessing regions must be changed to NewEntering
67 for (BasicBlock *ExitPred : predecessors(NewEntering)) {
68 Region *RegionOfPred = RI->getRegionFor(ExitPred);
69 if (RegionOfPred->getExit() != Entry)
70 continue;
71
72 while (!RegionOfPred->isTopLevelRegion() &&
73 RegionOfPred->getExit() == Entry) {
74 RegionOfPred->replaceExit(NewEntering);
75 RegionOfPred = RegionOfPred->getParent();
76 }
77 }
78
79 // Make all ancestors use EnteringBB as entry; there might be edges to it
80 Region *AncestorR = R->getParent();
81 RI->setRegionFor(NewEntering, AncestorR);
82 while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) {
83 AncestorR->replaceEntry(NewEntering);
84 AncestorR = AncestorR->getParent();
85 }
86 }
87
88 EnteringBB = NewEntering;
89 }
90 assert(R->getEnteringBlock() == EnteringBB);
91
92 // After:
93 //
94 // \ / //
95 // EnteringBB //
96 // | //
97 // | //
98 // Entry <--\ //
99 // / \ / //
100 // .... //
101 }
102
103 // Ensure that the region has a single block that branches to the exit node.
simplifyRegionExit(Region * R,DominatorTree * DT,LoopInfo * LI,RegionInfo * RI)104 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
105 RegionInfo *RI) {
106 BasicBlock *ExitBB = R->getExit();
107 BasicBlock *ExitingBB = R->getExitingBlock();
108
109 // Before:
110 //
111 // (Region) ______/ //
112 // \ | / //
113 // ExitBB //
114 // / \ //
115
116 if (!ExitingBB) {
117 SmallVector<BasicBlock *, 4> Preds;
118 for (BasicBlock *P : predecessors(ExitBB))
119 if (R->contains(P))
120 Preds.push_back(P);
121
122 // Preds[0] Preds[1] otherBB //
123 // \ | ________/ //
124 // \ | / //
125 // BB //
126 ExitingBB =
127 SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
128 // Preds[0] Preds[1] otherBB //
129 // \ / / //
130 // BB.region_exiting / //
131 // \ / //
132 // BB //
133
134 if (RI)
135 RI->setRegionFor(ExitingBB, R);
136
137 // Change the exit of nested regions, but not the region itself,
138 R->replaceExitRecursive(ExitingBB);
139 R->replaceExit(ExitBB);
140 }
141 assert(ExitingBB == R->getExitingBlock());
142
143 // After:
144 //
145 // \ / //
146 // ExitingBB _____/ //
147 // \ / //
148 // ExitBB //
149 // / \ //
150 }
151
simplifyRegion(Region * R,DominatorTree * DT,LoopInfo * LI,RegionInfo * RI)152 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
153 RegionInfo *RI) {
154 assert(R && !R->isTopLevelRegion());
155 assert(!RI || RI == R->getRegionInfo());
156 assert((!RI || DT) &&
157 "RegionInfo requires DominatorTree to be updated as well");
158
159 simplifyRegionEntry(R, DT, LI, RI);
160 simplifyRegionExit(R, DT, LI, RI);
161 assert(R->isSimple());
162 }
163
164 // Split the block into two successive blocks.
165 //
166 // Like llvm::SplitBlock, but also preserves RegionInfo
splitBlock(BasicBlock * Old,Instruction * SplitPt,DominatorTree * DT,llvm::LoopInfo * LI,RegionInfo * RI)167 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
168 DominatorTree *DT, llvm::LoopInfo *LI,
169 RegionInfo *RI) {
170 assert(Old && SplitPt);
171
172 // Before:
173 //
174 // \ / //
175 // Old //
176 // / \ //
177
178 BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
179
180 if (RI) {
181 Region *R = RI->getRegionFor(Old);
182 RI->setRegionFor(NewBlock, R);
183 }
184
185 // After:
186 //
187 // \ / //
188 // Old //
189 // | //
190 // NewBlock //
191 // / \ //
192
193 return NewBlock;
194 }
195
splitEntryBlockForAlloca(BasicBlock * EntryBlock,DominatorTree * DT,LoopInfo * LI,RegionInfo * RI)196 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
197 LoopInfo *LI, RegionInfo *RI) {
198 // Find first non-alloca instruction. Every basic block has a non-alloca
199 // instruction, as every well formed basic block has a terminator.
200 BasicBlock::iterator I = EntryBlock->begin();
201 while (isa<AllocaInst>(I))
202 ++I;
203
204 // splitBlock updates DT, LI and RI.
205 splitBlock(EntryBlock, &*I, DT, LI, RI);
206 }
207
splitEntryBlockForAlloca(BasicBlock * EntryBlock,Pass * P)208 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
209 auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
210 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
211 auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
212 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
213 RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
214 RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr;
215
216 // splitBlock updates DT, LI and RI.
217 polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
218 }
219
recordAssumption(polly::RecordedAssumptionsTy * RecordedAssumptions,polly::AssumptionKind Kind,isl::set Set,DebugLoc Loc,polly::AssumptionSign Sign,BasicBlock * BB,bool RTC)220 void polly::recordAssumption(polly::RecordedAssumptionsTy *RecordedAssumptions,
221 polly::AssumptionKind Kind, isl::set Set,
222 DebugLoc Loc, polly::AssumptionSign Sign,
223 BasicBlock *BB, bool RTC) {
224 assert((Set.is_params() || BB) &&
225 "Assumptions without a basic block must be parameter sets");
226 if (RecordedAssumptions)
227 RecordedAssumptions->push_back({Kind, Sign, Set, Loc, BB, RTC});
228 }
229
230 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
231 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want
232 /// however to generate new code if the instruction is in the analyzed region
233 /// and we generate code outside/in front of that region. Hence, we generate the
234 /// code for the SDiv/SRem operands in front of the analyzed region and then
235 /// create a new SDiv/SRem operation there too.
236 struct ScopExpander final : SCEVVisitor<ScopExpander, const SCEV *> {
237 friend struct SCEVVisitor<ScopExpander, const SCEV *>;
238
ScopExpanderScopExpander239 explicit ScopExpander(const Region &R, ScalarEvolution &SE,
240 const DataLayout &DL, const char *Name, ValueMapT *VMap,
241 BasicBlock *RTCBB)
242 : Expander(SE, DL, Name, /*PreserveLCSSA=*/false), SE(SE), Name(Name),
243 R(R), VMap(VMap), RTCBB(RTCBB) {}
244
expandCodeForScopExpander245 Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
246 // If we generate code in the region we will immediately fall back to the
247 // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
248 // needed replace them by copies computed in the entering block.
249 if (!R.contains(I))
250 E = visit(E);
251 return Expander.expandCodeFor(E, Ty, I);
252 }
253
visitScopExpander254 const SCEV *visit(const SCEV *E) {
255 // Cache the expansion results for intermediate SCEV expressions. A SCEV
256 // expression can refer to an operand multiple times (e.g. "x*x), so
257 // a naive visitor takes exponential time.
258 if (SCEVCache.count(E))
259 return SCEVCache[E];
260 const SCEV *Result = SCEVVisitor::visit(E);
261 SCEVCache[E] = Result;
262 return Result;
263 }
264
265 private:
266 SCEVExpander Expander;
267 ScalarEvolution &SE;
268 const char *Name;
269 const Region &R;
270 ValueMapT *VMap;
271 BasicBlock *RTCBB;
272 DenseMap<const SCEV *, const SCEV *> SCEVCache;
273
visitGenericInstScopExpander274 const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
275 Instruction *IP) {
276 if (!Inst || !R.contains(Inst))
277 return E;
278
279 assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
280 !isa<PHINode>(Inst));
281
282 auto *InstClone = Inst->clone();
283 for (auto &Op : Inst->operands()) {
284 assert(SE.isSCEVable(Op->getType()));
285 auto *OpSCEV = SE.getSCEV(Op);
286 auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
287 InstClone->replaceUsesOfWith(Op, OpClone);
288 }
289
290 InstClone->setName(Name + Inst->getName());
291 InstClone->insertBefore(IP);
292 return SE.getSCEV(InstClone);
293 }
294
visitUnknownScopExpander295 const SCEV *visitUnknown(const SCEVUnknown *E) {
296
297 // If a value mapping was given try if the underlying value is remapped.
298 Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr;
299 if (NewVal) {
300 auto *NewE = SE.getSCEV(NewVal);
301
302 // While the mapped value might be different the SCEV representation might
303 // not be. To this end we will check before we go into recursion here.
304 if (E != NewE)
305 return visit(NewE);
306 }
307
308 Instruction *Inst = dyn_cast<Instruction>(E->getValue());
309 Instruction *IP;
310 if (Inst && !R.contains(Inst))
311 IP = Inst;
312 else if (Inst && RTCBB->getParent() == Inst->getFunction())
313 IP = RTCBB->getTerminator();
314 else
315 IP = RTCBB->getParent()->getEntryBlock().getTerminator();
316
317 if (!Inst || (Inst->getOpcode() != Instruction::SRem &&
318 Inst->getOpcode() != Instruction::SDiv))
319 return visitGenericInst(E, Inst, IP);
320
321 const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
322 const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
323
324 if (!SE.isKnownNonZero(RHSScev))
325 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
326
327 Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
328 Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
329
330 Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
331 LHS, RHS, Inst->getName() + Name, IP);
332 return SE.getSCEV(Inst);
333 }
334
335 /// The following functions will just traverse the SCEV and rebuild it with
336 /// the new operands returned by the traversal.
337 ///
338 ///{
visitConstantScopExpander339 const SCEV *visitConstant(const SCEVConstant *E) { return E; }
visitPtrToIntExprScopExpander340 const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *E) {
341 return SE.getPtrToIntExpr(visit(E->getOperand()), E->getType());
342 }
visitTruncateExprScopExpander343 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
344 return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
345 }
visitZeroExtendExprScopExpander346 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
347 return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
348 }
visitSignExtendExprScopExpander349 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
350 return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
351 }
visitUDivExprScopExpander352 const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
353 auto *RHSScev = visit(E->getRHS());
354 if (!SE.isKnownNonZero(RHSScev))
355 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
356 return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
357 }
visitAddExprScopExpander358 const SCEV *visitAddExpr(const SCEVAddExpr *E) {
359 SmallVector<const SCEV *, 4> NewOps;
360 for (const SCEV *Op : E->operands())
361 NewOps.push_back(visit(Op));
362 return SE.getAddExpr(NewOps);
363 }
visitMulExprScopExpander364 const SCEV *visitMulExpr(const SCEVMulExpr *E) {
365 SmallVector<const SCEV *, 4> NewOps;
366 for (const SCEV *Op : E->operands())
367 NewOps.push_back(visit(Op));
368 return SE.getMulExpr(NewOps);
369 }
visitUMaxExprScopExpander370 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
371 SmallVector<const SCEV *, 4> NewOps;
372 for (const SCEV *Op : E->operands())
373 NewOps.push_back(visit(Op));
374 return SE.getUMaxExpr(NewOps);
375 }
visitSMaxExprScopExpander376 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
377 SmallVector<const SCEV *, 4> NewOps;
378 for (const SCEV *Op : E->operands())
379 NewOps.push_back(visit(Op));
380 return SE.getSMaxExpr(NewOps);
381 }
visitUMinExprScopExpander382 const SCEV *visitUMinExpr(const SCEVUMinExpr *E) {
383 SmallVector<const SCEV *, 4> NewOps;
384 for (const SCEV *Op : E->operands())
385 NewOps.push_back(visit(Op));
386 return SE.getUMinExpr(NewOps);
387 }
visitSMinExprScopExpander388 const SCEV *visitSMinExpr(const SCEVSMinExpr *E) {
389 SmallVector<const SCEV *, 4> NewOps;
390 for (const SCEV *Op : E->operands())
391 NewOps.push_back(visit(Op));
392 return SE.getSMinExpr(NewOps);
393 }
visitSequentialUMinExprScopExpander394 const SCEV *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *E) {
395 SmallVector<const SCEV *, 4> NewOps;
396 for (const SCEV *Op : E->operands())
397 NewOps.push_back(visit(Op));
398 return SE.getUMinExpr(NewOps, /*Sequential=*/true);
399 }
visitAddRecExprScopExpander400 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
401 SmallVector<const SCEV *, 4> NewOps;
402 for (const SCEV *Op : E->operands())
403 NewOps.push_back(visit(Op));
404 return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
405 }
406 ///}
407 };
408
expandCodeFor(Scop & S,ScalarEvolution & SE,const DataLayout & DL,const char * Name,const SCEV * E,Type * Ty,Instruction * IP,ValueMapT * VMap,BasicBlock * RTCBB)409 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
410 const char *Name, const SCEV *E, Type *Ty,
411 Instruction *IP, ValueMapT *VMap,
412 BasicBlock *RTCBB) {
413 ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
414 return Expander.expandCodeFor(E, Ty, IP);
415 }
416
getConditionFromTerminator(Instruction * TI)417 Value *polly::getConditionFromTerminator(Instruction *TI) {
418 if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
419 if (BR->isUnconditional())
420 return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
421
422 return BR->getCondition();
423 }
424
425 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
426 return SI->getCondition();
427
428 return nullptr;
429 }
430
getLoopSurroundingScop(Scop & S,LoopInfo & LI)431 Loop *polly::getLoopSurroundingScop(Scop &S, LoopInfo &LI) {
432 // Start with the smallest loop containing the entry and expand that
433 // loop until it contains all blocks in the region. If there is a loop
434 // containing all blocks in the region check if it is itself contained
435 // and if so take the parent loop as it will be the smallest containing
436 // the region but not contained by it.
437 Loop *L = LI.getLoopFor(S.getEntry());
438 while (L) {
439 bool AllContained = true;
440 for (auto *BB : S.blocks())
441 AllContained &= L->contains(BB);
442 if (AllContained)
443 break;
444 L = L->getParentLoop();
445 }
446
447 return L ? (S.contains(L) ? L->getParentLoop() : L) : nullptr;
448 }
449
getNumBlocksInLoop(Loop * L)450 unsigned polly::getNumBlocksInLoop(Loop *L) {
451 unsigned NumBlocks = L->getNumBlocks();
452 SmallVector<BasicBlock *, 4> ExitBlocks;
453 L->getExitBlocks(ExitBlocks);
454
455 for (auto ExitBlock : ExitBlocks) {
456 if (isa<UnreachableInst>(ExitBlock->getTerminator()))
457 NumBlocks++;
458 }
459 return NumBlocks;
460 }
461
getNumBlocksInRegionNode(RegionNode * RN)462 unsigned polly::getNumBlocksInRegionNode(RegionNode *RN) {
463 if (!RN->isSubRegion())
464 return 1;
465
466 Region *R = RN->getNodeAs<Region>();
467 return std::distance(R->block_begin(), R->block_end());
468 }
469
getRegionNodeLoop(RegionNode * RN,LoopInfo & LI)470 Loop *polly::getRegionNodeLoop(RegionNode *RN, LoopInfo &LI) {
471 if (!RN->isSubRegion()) {
472 BasicBlock *BB = RN->getNodeAs<BasicBlock>();
473 Loop *L = LI.getLoopFor(BB);
474
475 // Unreachable statements are not considered to belong to a LLVM loop, as
476 // they are not part of an actual loop in the control flow graph.
477 // Nevertheless, we handle certain unreachable statements that are common
478 // when modeling run-time bounds checks as being part of the loop to be
479 // able to model them and to later eliminate the run-time bounds checks.
480 //
481 // Specifically, for basic blocks that terminate in an unreachable and
482 // where the immediate predecessor is part of a loop, we assume these
483 // basic blocks belong to the loop the predecessor belongs to. This
484 // allows us to model the following code.
485 //
486 // for (i = 0; i < N; i++) {
487 // if (i > 1024)
488 // abort(); <- this abort might be translated to an
489 // unreachable
490 //
491 // A[i] = ...
492 // }
493 if (!L && isa<UnreachableInst>(BB->getTerminator()) && BB->getPrevNode())
494 L = LI.getLoopFor(BB->getPrevNode());
495 return L;
496 }
497
498 Region *NonAffineSubRegion = RN->getNodeAs<Region>();
499 Loop *L = LI.getLoopFor(NonAffineSubRegion->getEntry());
500 while (L && NonAffineSubRegion->contains(L))
501 L = L->getParentLoop();
502 return L;
503 }
504
hasVariantIndex(GetElementPtrInst * Gep,Loop * L,Region & R,ScalarEvolution & SE)505 static bool hasVariantIndex(GetElementPtrInst *Gep, Loop *L, Region &R,
506 ScalarEvolution &SE) {
507 for (const Use &Val : llvm::drop_begin(Gep->operands(), 1)) {
508 const SCEV *PtrSCEV = SE.getSCEVAtScope(Val, L);
509 Loop *OuterLoop = R.outermostLoopInRegion(L);
510 if (!SE.isLoopInvariant(PtrSCEV, OuterLoop))
511 return true;
512 }
513 return false;
514 }
515
isHoistableLoad(LoadInst * LInst,Region & R,LoopInfo & LI,ScalarEvolution & SE,const DominatorTree & DT,const InvariantLoadsSetTy & KnownInvariantLoads)516 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
517 ScalarEvolution &SE, const DominatorTree &DT,
518 const InvariantLoadsSetTy &KnownInvariantLoads) {
519 Loop *L = LI.getLoopFor(LInst->getParent());
520 auto *Ptr = LInst->getPointerOperand();
521
522 // A LoadInst is hoistable if the address it is loading from is also
523 // invariant; in this case: another invariant load (whether that address
524 // is also not written to has to be checked separately)
525 // TODO: This only checks for a LoadInst->GetElementPtrInst->LoadInst
526 // pattern generated by the Chapel frontend, but generally this applies
527 // for any chain of instruction that does not also depend on any
528 // induction variable
529 if (auto *GepInst = dyn_cast<GetElementPtrInst>(Ptr)) {
530 if (!hasVariantIndex(GepInst, L, R, SE)) {
531 if (auto *DecidingLoad =
532 dyn_cast<LoadInst>(GepInst->getPointerOperand())) {
533 if (KnownInvariantLoads.count(DecidingLoad))
534 return true;
535 }
536 }
537 }
538
539 const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
540 while (L && R.contains(L)) {
541 if (!SE.isLoopInvariant(PtrSCEV, L))
542 return false;
543 L = L->getParentLoop();
544 }
545
546 for (auto *User : Ptr->users()) {
547 auto *UserI = dyn_cast<Instruction>(User);
548 if (!UserI || !R.contains(UserI))
549 continue;
550 if (!UserI->mayWriteToMemory())
551 continue;
552
553 auto &BB = *UserI->getParent();
554 if (DT.dominates(&BB, LInst->getParent()))
555 return false;
556
557 bool DominatesAllPredecessors = true;
558 if (R.isTopLevelRegion()) {
559 for (BasicBlock &I : *R.getEntry()->getParent())
560 if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
561 DominatesAllPredecessors = false;
562 } else {
563 for (auto Pred : predecessors(R.getExit()))
564 if (R.contains(Pred) && !DT.dominates(&BB, Pred))
565 DominatesAllPredecessors = false;
566 }
567
568 if (!DominatesAllPredecessors)
569 continue;
570
571 return false;
572 }
573
574 return true;
575 }
576
isIgnoredIntrinsic(const Value * V)577 bool polly::isIgnoredIntrinsic(const Value *V) {
578 if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
579 switch (IT->getIntrinsicID()) {
580 // Lifetime markers are supported/ignored.
581 case llvm::Intrinsic::lifetime_start:
582 case llvm::Intrinsic::lifetime_end:
583 // Invariant markers are supported/ignored.
584 case llvm::Intrinsic::invariant_start:
585 case llvm::Intrinsic::invariant_end:
586 // Some misc annotations are supported/ignored.
587 case llvm::Intrinsic::var_annotation:
588 case llvm::Intrinsic::ptr_annotation:
589 case llvm::Intrinsic::annotation:
590 case llvm::Intrinsic::donothing:
591 case llvm::Intrinsic::assume:
592 // Some debug info intrinsics are supported/ignored.
593 case llvm::Intrinsic::dbg_value:
594 case llvm::Intrinsic::dbg_declare:
595 return true;
596 default:
597 break;
598 }
599 }
600 return false;
601 }
602
canSynthesize(const Value * V,const Scop & S,ScalarEvolution * SE,Loop * Scope)603 bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
604 Loop *Scope) {
605 if (!V || !SE->isSCEVable(V->getType()))
606 return false;
607
608 const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
609 if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
610 if (!isa<SCEVCouldNotCompute>(Scev))
611 if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
612 return true;
613
614 return false;
615 }
616
getUseBlock(const llvm::Use & U)617 llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
618 Instruction *UI = dyn_cast<Instruction>(U.getUser());
619 if (!UI)
620 return nullptr;
621
622 if (PHINode *PHI = dyn_cast<PHINode>(UI))
623 return PHI->getIncomingBlock(U);
624
625 return UI->getParent();
626 }
627
getFirstNonBoxedLoopFor(llvm::Loop * L,llvm::LoopInfo & LI,const BoxedLoopsSetTy & BoxedLoops)628 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
629 const BoxedLoopsSetTy &BoxedLoops) {
630 while (BoxedLoops.count(L))
631 L = L->getParentLoop();
632 return L;
633 }
634
getFirstNonBoxedLoopFor(llvm::BasicBlock * BB,llvm::LoopInfo & LI,const BoxedLoopsSetTy & BoxedLoops)635 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
636 llvm::LoopInfo &LI,
637 const BoxedLoopsSetTy &BoxedLoops) {
638 Loop *L = LI.getLoopFor(BB);
639 return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
640 }
641
isDebugCall(Instruction * Inst)642 bool polly::isDebugCall(Instruction *Inst) {
643 auto *CI = dyn_cast<CallInst>(Inst);
644 if (!CI)
645 return false;
646
647 Function *CF = CI->getCalledFunction();
648 if (!CF)
649 return false;
650
651 return std::find(DebugFunctions.begin(), DebugFunctions.end(),
652 CF->getName()) != DebugFunctions.end();
653 }
654
hasDebugCall(BasicBlock * BB)655 static bool hasDebugCall(BasicBlock *BB) {
656 for (Instruction &Inst : *BB) {
657 if (isDebugCall(&Inst))
658 return true;
659 }
660 return false;
661 }
662
hasDebugCall(ScopStmt * Stmt)663 bool polly::hasDebugCall(ScopStmt *Stmt) {
664 // Quick skip if no debug functions have been defined.
665 if (DebugFunctions.empty())
666 return false;
667
668 if (!Stmt)
669 return false;
670
671 for (Instruction *Inst : Stmt->getInstructions())
672 if (isDebugCall(Inst))
673 return true;
674
675 if (Stmt->isRegionStmt()) {
676 for (BasicBlock *RBB : Stmt->getRegion()->blocks())
677 if (RBB != Stmt->getEntryBlock() && ::hasDebugCall(RBB))
678 return true;
679 }
680
681 return false;
682 }
683
684 /// Find a property in a LoopID.
findNamedMetadataNode(MDNode * LoopMD,StringRef Name)685 static MDNode *findNamedMetadataNode(MDNode *LoopMD, StringRef Name) {
686 if (!LoopMD)
687 return nullptr;
688 for (const MDOperand &X : drop_begin(LoopMD->operands(), 1)) {
689 auto *OpNode = dyn_cast<MDNode>(X.get());
690 if (!OpNode)
691 continue;
692
693 auto *OpName = dyn_cast<MDString>(OpNode->getOperand(0));
694 if (!OpName)
695 continue;
696 if (OpName->getString() == Name)
697 return OpNode;
698 }
699 return nullptr;
700 }
701
findNamedMetadataArg(MDNode * LoopID,StringRef Name)702 static Optional<const MDOperand *> findNamedMetadataArg(MDNode *LoopID,
703 StringRef Name) {
704 MDNode *MD = findNamedMetadataNode(LoopID, Name);
705 if (!MD)
706 return None;
707 switch (MD->getNumOperands()) {
708 case 1:
709 return nullptr;
710 case 2:
711 return &MD->getOperand(1);
712 default:
713 llvm_unreachable("loop metadata has 0 or 1 operand");
714 }
715 }
716
findMetadataOperand(MDNode * LoopMD,StringRef Name)717 Optional<Metadata *> polly::findMetadataOperand(MDNode *LoopMD,
718 StringRef Name) {
719 MDNode *MD = findNamedMetadataNode(LoopMD, Name);
720 if (!MD)
721 return None;
722 switch (MD->getNumOperands()) {
723 case 1:
724 return nullptr;
725 case 2:
726 return MD->getOperand(1).get();
727 default:
728 llvm_unreachable("loop metadata must have 0 or 1 operands");
729 }
730 }
731
getOptionalBoolLoopAttribute(MDNode * LoopID,StringRef Name)732 static Optional<bool> getOptionalBoolLoopAttribute(MDNode *LoopID,
733 StringRef Name) {
734 MDNode *MD = findNamedMetadataNode(LoopID, Name);
735 if (!MD)
736 return None;
737 switch (MD->getNumOperands()) {
738 case 1:
739 return true;
740 case 2:
741 if (ConstantInt *IntMD =
742 mdconst::extract_or_null<ConstantInt>(MD->getOperand(1).get()))
743 return IntMD->getZExtValue();
744 return true;
745 }
746 llvm_unreachable("unexpected number of options");
747 }
748
getBooleanLoopAttribute(MDNode * LoopID,StringRef Name)749 bool polly::getBooleanLoopAttribute(MDNode *LoopID, StringRef Name) {
750 return getOptionalBoolLoopAttribute(LoopID, Name).value_or(false);
751 }
752
getOptionalIntLoopAttribute(MDNode * LoopID,StringRef Name)753 llvm::Optional<int> polly::getOptionalIntLoopAttribute(MDNode *LoopID,
754 StringRef Name) {
755 const MDOperand *AttrMD =
756 findNamedMetadataArg(LoopID, Name).value_or(nullptr);
757 if (!AttrMD)
758 return None;
759
760 ConstantInt *IntMD = mdconst::extract_or_null<ConstantInt>(AttrMD->get());
761 if (!IntMD)
762 return None;
763
764 return IntMD->getSExtValue();
765 }
766
hasDisableAllTransformsHint(Loop * L)767 bool polly::hasDisableAllTransformsHint(Loop *L) {
768 return llvm::hasDisableAllTransformsHint(L);
769 }
770
hasDisableAllTransformsHint(llvm::MDNode * LoopID)771 bool polly::hasDisableAllTransformsHint(llvm::MDNode *LoopID) {
772 return getBooleanLoopAttribute(LoopID, "llvm.loop.disable_nonforced");
773 }
774
getIslLoopAttr(isl::ctx Ctx,BandAttr * Attr)775 isl::id polly::getIslLoopAttr(isl::ctx Ctx, BandAttr *Attr) {
776 assert(Attr && "Must be a valid BandAttr");
777
778 // The name "Loop" signals that this id contains a pointer to a BandAttr.
779 // The ScheduleOptimizer also uses the string "Inter iteration alias-free" in
780 // markers, but it's user pointer is an llvm::Value.
781 isl::id Result = isl::id::alloc(Ctx, "Loop with Metadata", Attr);
782 Result = isl::manage(isl_id_set_free_user(Result.release(), [](void *Ptr) {
783 BandAttr *Attr = reinterpret_cast<BandAttr *>(Ptr);
784 delete Attr;
785 }));
786 return Result;
787 }
788
createIslLoopAttr(isl::ctx Ctx,Loop * L)789 isl::id polly::createIslLoopAttr(isl::ctx Ctx, Loop *L) {
790 if (!L)
791 return {};
792
793 // A loop without metadata does not need to be annotated.
794 MDNode *LoopID = L->getLoopID();
795 if (!LoopID)
796 return {};
797
798 BandAttr *Attr = new BandAttr();
799 Attr->OriginalLoop = L;
800 Attr->Metadata = L->getLoopID();
801
802 return getIslLoopAttr(Ctx, Attr);
803 }
804
isLoopAttr(const isl::id & Id)805 bool polly::isLoopAttr(const isl::id &Id) {
806 if (Id.is_null())
807 return false;
808
809 return Id.get_name() == "Loop with Metadata";
810 }
811
getLoopAttr(const isl::id & Id)812 BandAttr *polly::getLoopAttr(const isl::id &Id) {
813 if (!isLoopAttr(Id))
814 return nullptr;
815
816 return reinterpret_cast<BandAttr *>(Id.get_user());
817 }
818