1 //===- ScopBuilder.cpp ---------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Create a polyhedral description for a static control flow region.
11 //
12 // The pass creates a polyhedral description of the Scops detected by the SCoP
13 // detection derived from their LLVM-IR code.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "polly/ScopBuilder.h"
18 #include "polly/Options.h"
19 #include "polly/Support/GICHelper.h"
20 #include "polly/Support/SCEVValidator.h"
21 #include "llvm/Analysis/RegionIterator.h"
22 #include "llvm/IR/DiagnosticInfo.h"
23 
24 using namespace llvm;
25 using namespace polly;
26 
27 #define DEBUG_TYPE "polly-scops"
28 
29 STATISTIC(ScopFound, "Number of valid Scops");
30 STATISTIC(RichScopFound, "Number of Scops containing a loop");
31 STATISTIC(InfeasibleScops,
32           "Number of SCoPs with statically infeasible context.");
33 
34 // If the loop is nonaffine/boxed, return the first non-boxed surrounding loop
35 // for Polly. If the loop is affine, return the loop itself. Do not call
36 // `getSCEVAtScope()` on the result of `getFirstNonBoxedLoopFor()`, as we need
37 // to analyze the memory accesses of the nonaffine/boxed loops.
38 static Loop *getFirstNonBoxedLoopFor(Loop *L, LoopInfo &LI,
39                                      const BoxedLoopsSetTy &BoxedLoops) {
40   while (BoxedLoops.count(L))
41     L = L->getParentLoop();
42   return L;
43 }
44 
45 static cl::opt<bool> ModelReadOnlyScalars(
46     "polly-analyze-read-only-scalars",
47     cl::desc("Model read-only scalar values in the scop description"),
48     cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::cat(PollyCategory));
49 
50 void ScopBuilder::buildPHIAccesses(PHINode *PHI, Region *NonAffineSubRegion,
51                                    bool IsExitBlock) {
52 
53   // PHI nodes that are in the exit block of the region, hence if IsExitBlock is
54   // true, are not modeled as ordinary PHI nodes as they are not part of the
55   // region. However, we model the operands in the predecessor blocks that are
56   // part of the region as regular scalar accesses.
57 
58   // If we can synthesize a PHI we can skip it, however only if it is in
59   // the region. If it is not it can only be in the exit block of the region.
60   // In this case we model the operands but not the PHI itself.
61   auto *Scope = LI.getLoopFor(PHI->getParent());
62   if (!IsExitBlock && canSynthesize(PHI, *scop, &SE, Scope))
63     return;
64 
65   // PHI nodes are modeled as if they had been demoted prior to the SCoP
66   // detection. Hence, the PHI is a load of a new memory location in which the
67   // incoming value was written at the end of the incoming basic block.
68   bool OnlyNonAffineSubRegionOperands = true;
69   for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
70     Value *Op = PHI->getIncomingValue(u);
71     BasicBlock *OpBB = PHI->getIncomingBlock(u);
72 
73     // Do not build PHI dependences inside a non-affine subregion, but make
74     // sure that the necessary scalar values are still made available.
75     if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB)) {
76       auto *OpInst = dyn_cast<Instruction>(Op);
77       if (!OpInst || !NonAffineSubRegion->contains(OpInst))
78         ensureValueRead(Op, OpBB);
79       continue;
80     }
81 
82     OnlyNonAffineSubRegionOperands = false;
83     ensurePHIWrite(PHI, OpBB, Op, IsExitBlock);
84   }
85 
86   if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
87     addPHIReadAccess(PHI);
88   }
89 }
90 
91 void ScopBuilder::buildScalarDependences(Instruction *Inst) {
92   assert(!isa<PHINode>(Inst));
93 
94   // Pull-in required operands.
95   for (Use &Op : Inst->operands())
96     ensureValueRead(Op.get(), Inst->getParent());
97 }
98 
99 void ScopBuilder::buildEscapingDependences(Instruction *Inst) {
100   // Check for uses of this instruction outside the scop. Because we do not
101   // iterate over such instructions and therefore did not "ensure" the existence
102   // of a write, we must determine such use here.
103   for (Use &U : Inst->uses()) {
104     Instruction *UI = dyn_cast<Instruction>(U.getUser());
105     if (!UI)
106       continue;
107 
108     BasicBlock *UseParent = getUseBlock(U);
109     BasicBlock *UserParent = UI->getParent();
110 
111     // An escaping value is either used by an instruction not within the scop,
112     // or (when the scop region's exit needs to be simplified) by a PHI in the
113     // scop's exit block. This is because region simplification before code
114     // generation inserts new basic blocks before the PHI such that its incoming
115     // blocks are not in the scop anymore.
116     if (!scop->contains(UseParent) ||
117         (isa<PHINode>(UI) && scop->isExit(UserParent) &&
118          scop->hasSingleExitEdge())) {
119       // At least one escaping use found.
120       ensureValueWrite(Inst);
121       break;
122     }
123   }
124 }
125 
126 bool ScopBuilder::buildAccessMultiDimFixed(MemAccInst Inst, Loop *L) {
127   Value *Val = Inst.getValueOperand();
128   Type *ElementType = Val->getType();
129   Value *Address = Inst.getPointerOperand();
130   const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
131   const SCEVUnknown *BasePointer =
132       dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
133   enum MemoryAccess::AccessType AccType =
134       isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
135 
136   if (auto *BitCast = dyn_cast<BitCastInst>(Address)) {
137     auto *Src = BitCast->getOperand(0);
138     auto *SrcTy = Src->getType();
139     auto *DstTy = BitCast->getType();
140     // Do not try to delinearize non-sized (opaque) pointers.
141     if ((SrcTy->isPointerTy() && !SrcTy->getPointerElementType()->isSized()) ||
142         (DstTy->isPointerTy() && !DstTy->getPointerElementType()->isSized())) {
143       return false;
144     }
145     if (SrcTy->isPointerTy() && DstTy->isPointerTy() &&
146         DL.getTypeAllocSize(SrcTy->getPointerElementType()) ==
147             DL.getTypeAllocSize(DstTy->getPointerElementType()))
148       Address = Src;
149   }
150 
151   auto *GEP = dyn_cast<GetElementPtrInst>(Address);
152   if (!GEP)
153     return false;
154 
155   std::vector<const SCEV *> Subscripts;
156   std::vector<int> Sizes;
157   std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, SE);
158   auto *BasePtr = GEP->getOperand(0);
159 
160   if (auto *BasePtrCast = dyn_cast<BitCastInst>(BasePtr))
161     BasePtr = BasePtrCast->getOperand(0);
162 
163   // Check for identical base pointers to ensure that we do not miss index
164   // offsets that have been added before this GEP is applied.
165   if (BasePtr != BasePointer->getValue())
166     return false;
167 
168   std::vector<const SCEV *> SizesSCEV;
169 
170   const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
171 
172   Loop *SurroundingLoop = getFirstNonBoxedLoopFor(L, LI, scop->getBoxedLoops());
173   for (auto *Subscript : Subscripts) {
174     InvariantLoadsSetTy AccessILS;
175     if (!isAffineExpr(&scop->getRegion(), SurroundingLoop, Subscript, SE,
176                       &AccessILS))
177       return false;
178 
179     for (LoadInst *LInst : AccessILS)
180       if (!ScopRIL.count(LInst))
181         return false;
182   }
183 
184   if (Sizes.empty())
185     return false;
186 
187   SizesSCEV.push_back(nullptr);
188 
189   for (auto V : Sizes)
190     SizesSCEV.push_back(SE.getSCEV(
191         ConstantInt::get(IntegerType::getInt64Ty(BasePtr->getContext()), V)));
192 
193   addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
194                  Subscripts, SizesSCEV, Val);
195   return true;
196 }
197 
198 bool ScopBuilder::buildAccessMultiDimParam(MemAccInst Inst, Loop *L) {
199   if (!PollyDelinearize)
200     return false;
201 
202   Value *Address = Inst.getPointerOperand();
203   Value *Val = Inst.getValueOperand();
204   Type *ElementType = Val->getType();
205   unsigned ElementSize = DL.getTypeAllocSize(ElementType);
206   enum MemoryAccess::AccessType AccType =
207       isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
208 
209   const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
210   const SCEVUnknown *BasePointer =
211       dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
212 
213   assert(BasePointer && "Could not find base pointer");
214 
215   auto &InsnToMemAcc = scop->getInsnToMemAccMap();
216   auto AccItr = InsnToMemAcc.find(Inst);
217   if (AccItr == InsnToMemAcc.end())
218     return false;
219 
220   std::vector<const SCEV *> Sizes = {nullptr};
221 
222   Sizes.insert(Sizes.end(), AccItr->second.Shape->DelinearizedSizes.begin(),
223                AccItr->second.Shape->DelinearizedSizes.end());
224   // Remove the element size. This information is already provided by the
225   // ElementSize parameter. In case the element size of this access and the
226   // element size used for delinearization differs the delinearization is
227   // incorrect. Hence, we invalidate the scop.
228   //
229   // TODO: Handle delinearization with differing element sizes.
230   auto DelinearizedSize =
231       cast<SCEVConstant>(Sizes.back())->getAPInt().getSExtValue();
232   Sizes.pop_back();
233   if (ElementSize != DelinearizedSize)
234     scop->invalidate(DELINEARIZATION, Inst->getDebugLoc());
235 
236   addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
237                  AccItr->second.DelinearizedSubscripts, Sizes, Val);
238   return true;
239 }
240 
241 bool ScopBuilder::buildAccessMemIntrinsic(MemAccInst Inst, Loop *L) {
242   auto *MemIntr = dyn_cast_or_null<MemIntrinsic>(Inst);
243 
244   if (MemIntr == nullptr)
245     return false;
246 
247   auto *LengthVal = SE.getSCEVAtScope(MemIntr->getLength(), L);
248   assert(LengthVal);
249 
250   // Check if the length val is actually affine or if we overapproximate it
251   InvariantLoadsSetTy AccessILS;
252   const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
253 
254   Loop *SurroundingLoop = getFirstNonBoxedLoopFor(L, LI, scop->getBoxedLoops());
255   bool LengthIsAffine = isAffineExpr(&scop->getRegion(), SurroundingLoop,
256                                      LengthVal, SE, &AccessILS);
257   for (LoadInst *LInst : AccessILS)
258     if (!ScopRIL.count(LInst))
259       LengthIsAffine = false;
260   if (!LengthIsAffine)
261     LengthVal = nullptr;
262 
263   auto *DestPtrVal = MemIntr->getDest();
264   assert(DestPtrVal);
265 
266   auto *DestAccFunc = SE.getSCEVAtScope(DestPtrVal, L);
267   assert(DestAccFunc);
268   // Ignore accesses to "NULL".
269   // TODO: We could use this to optimize the region further, e.g., intersect
270   //       the context with
271   //          isl_set_complement(isl_set_params(getDomain()))
272   //       as we know it would be undefined to execute this instruction anyway.
273   if (DestAccFunc->isZero())
274     return true;
275 
276   auto *DestPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(DestAccFunc));
277   assert(DestPtrSCEV);
278   DestAccFunc = SE.getMinusSCEV(DestAccFunc, DestPtrSCEV);
279   addArrayAccess(Inst, MemoryAccess::MUST_WRITE, DestPtrSCEV->getValue(),
280                  IntegerType::getInt8Ty(DestPtrVal->getContext()),
281                  LengthIsAffine, {DestAccFunc, LengthVal}, {nullptr},
282                  Inst.getValueOperand());
283 
284   auto *MemTrans = dyn_cast<MemTransferInst>(MemIntr);
285   if (!MemTrans)
286     return true;
287 
288   auto *SrcPtrVal = MemTrans->getSource();
289   assert(SrcPtrVal);
290 
291   auto *SrcAccFunc = SE.getSCEVAtScope(SrcPtrVal, L);
292   assert(SrcAccFunc);
293   // Ignore accesses to "NULL".
294   // TODO: See above TODO
295   if (SrcAccFunc->isZero())
296     return true;
297 
298   auto *SrcPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(SrcAccFunc));
299   assert(SrcPtrSCEV);
300   SrcAccFunc = SE.getMinusSCEV(SrcAccFunc, SrcPtrSCEV);
301   addArrayAccess(Inst, MemoryAccess::READ, SrcPtrSCEV->getValue(),
302                  IntegerType::getInt8Ty(SrcPtrVal->getContext()),
303                  LengthIsAffine, {SrcAccFunc, LengthVal}, {nullptr},
304                  Inst.getValueOperand());
305 
306   return true;
307 }
308 
309 bool ScopBuilder::buildAccessCallInst(MemAccInst Inst, Loop *L) {
310   auto *CI = dyn_cast_or_null<CallInst>(Inst);
311 
312   if (CI == nullptr)
313     return false;
314 
315   if (CI->doesNotAccessMemory() || isIgnoredIntrinsic(CI))
316     return true;
317 
318   bool ReadOnly = false;
319   auto *AF = SE.getConstant(IntegerType::getInt64Ty(CI->getContext()), 0);
320   auto *CalledFunction = CI->getCalledFunction();
321   switch (AA.getModRefBehavior(CalledFunction)) {
322   case FMRB_UnknownModRefBehavior:
323     llvm_unreachable("Unknown mod ref behaviour cannot be represented.");
324   case FMRB_DoesNotAccessMemory:
325     return true;
326   case FMRB_DoesNotReadMemory:
327   case FMRB_OnlyAccessesInaccessibleMem:
328   case FMRB_OnlyAccessesInaccessibleOrArgMem:
329     return false;
330   case FMRB_OnlyReadsMemory:
331     GlobalReads.push_back(CI);
332     return true;
333   case FMRB_OnlyReadsArgumentPointees:
334     ReadOnly = true;
335   // Fall through
336   case FMRB_OnlyAccessesArgumentPointees:
337     auto AccType = ReadOnly ? MemoryAccess::READ : MemoryAccess::MAY_WRITE;
338     for (const auto &Arg : CI->arg_operands()) {
339       if (!Arg->getType()->isPointerTy())
340         continue;
341 
342       auto *ArgSCEV = SE.getSCEVAtScope(Arg, L);
343       if (ArgSCEV->isZero())
344         continue;
345 
346       auto *ArgBasePtr = cast<SCEVUnknown>(SE.getPointerBase(ArgSCEV));
347       addArrayAccess(Inst, AccType, ArgBasePtr->getValue(),
348                      ArgBasePtr->getType(), false, {AF}, {nullptr}, CI);
349     }
350     return true;
351   }
352 
353   return true;
354 }
355 
356 void ScopBuilder::buildAccessSingleDim(MemAccInst Inst, Loop *L) {
357   Value *Address = Inst.getPointerOperand();
358   Value *Val = Inst.getValueOperand();
359   Type *ElementType = Val->getType();
360   enum MemoryAccess::AccessType AccType =
361       isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
362 
363   const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
364   const SCEVUnknown *BasePointer =
365       dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
366 
367   assert(BasePointer && "Could not find base pointer");
368   AccessFunction = SE.getMinusSCEV(AccessFunction, BasePointer);
369 
370   // Check if the access depends on a loop contained in a non-affine subregion.
371   bool isVariantInNonAffineLoop = false;
372   SetVector<const Loop *> Loops;
373   auto &BoxedLoops = scop->getBoxedLoops();
374   findLoops(AccessFunction, Loops);
375   for (const Loop *L : Loops)
376     if (BoxedLoops.count(L))
377       isVariantInNonAffineLoop = true;
378 
379   InvariantLoadsSetTy AccessILS;
380 
381   Loop *SurroundingLoop = getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
382   bool IsAffine = !isVariantInNonAffineLoop &&
383                   isAffineExpr(&scop->getRegion(), SurroundingLoop,
384                                AccessFunction, SE, &AccessILS);
385 
386   const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
387   for (LoadInst *LInst : AccessILS)
388     if (!ScopRIL.count(LInst))
389       IsAffine = false;
390 
391   if (!IsAffine && AccType == MemoryAccess::MUST_WRITE)
392     AccType = MemoryAccess::MAY_WRITE;
393 
394   addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, IsAffine,
395                  {AccessFunction}, {nullptr}, Val);
396 }
397 
398 void ScopBuilder::buildMemoryAccess(MemAccInst Inst, Loop *L) {
399 
400   if (buildAccessMemIntrinsic(Inst, L))
401     return;
402 
403   if (buildAccessCallInst(Inst, L))
404     return;
405 
406   if (buildAccessMultiDimFixed(Inst, L))
407     return;
408 
409   if (buildAccessMultiDimParam(Inst, L))
410     return;
411 
412   buildAccessSingleDim(Inst, L);
413 }
414 
415 void ScopBuilder::buildAccessFunctions(Region &SR) {
416 
417   if (scop->isNonAffineSubRegion(&SR)) {
418     for (BasicBlock *BB : SR.blocks())
419       buildAccessFunctions(*BB, &SR);
420     return;
421   }
422 
423   for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
424     if (I->isSubRegion())
425       buildAccessFunctions(*I->getNodeAs<Region>());
426     else
427       buildAccessFunctions(*I->getNodeAs<BasicBlock>());
428 }
429 
430 void ScopBuilder::buildStmts(Region &SR) {
431 
432   if (scop->isNonAffineSubRegion(&SR)) {
433     scop->addScopStmt(&SR);
434     return;
435   }
436 
437   for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
438     if (I->isSubRegion())
439       buildStmts(*I->getNodeAs<Region>());
440     else
441       scop->addScopStmt(I->getNodeAs<BasicBlock>());
442 }
443 
444 void ScopBuilder::buildAccessFunctions(BasicBlock &BB,
445                                        Region *NonAffineSubRegion,
446                                        bool IsExitBlock) {
447   // We do not build access functions for error blocks, as they may contain
448   // instructions we can not model.
449   if (isErrorBlock(BB, scop->getRegion(), LI, DT) && !IsExitBlock)
450     return;
451 
452   Loop *L = LI.getLoopFor(&BB);
453 
454   for (Instruction &Inst : BB) {
455     PHINode *PHI = dyn_cast<PHINode>(&Inst);
456     if (PHI)
457       buildPHIAccesses(PHI, NonAffineSubRegion, IsExitBlock);
458 
459     // For the exit block we stop modeling after the last PHI node.
460     if (!PHI && IsExitBlock)
461       break;
462 
463     if (auto MemInst = MemAccInst::dyn_cast(Inst))
464       buildMemoryAccess(MemInst, L);
465 
466     if (isIgnoredIntrinsic(&Inst))
467       continue;
468 
469     // PHI nodes have already been modeled above and TerminatorInsts that are
470     // not part of a non-affine subregion are fully modeled and regenerated
471     // from the polyhedral domains. Hence, they do not need to be modeled as
472     // explicit data dependences.
473     if (!PHI && (!isa<TerminatorInst>(&Inst) || NonAffineSubRegion))
474       buildScalarDependences(&Inst);
475 
476     if (!IsExitBlock)
477       buildEscapingDependences(&Inst);
478   }
479 }
480 
481 MemoryAccess *ScopBuilder::addMemoryAccess(
482     BasicBlock *BB, Instruction *Inst, MemoryAccess::AccessType AccType,
483     Value *BaseAddress, Type *ElementType, bool Affine, Value *AccessValue,
484     ArrayRef<const SCEV *> Subscripts, ArrayRef<const SCEV *> Sizes,
485     ScopArrayInfo::MemoryKind Kind) {
486   ScopStmt *Stmt = scop->getStmtFor(BB);
487 
488   // Do not create a memory access for anything not in the SCoP. It would be
489   // ignored anyway.
490   if (!Stmt)
491     return nullptr;
492 
493   Value *BaseAddr = BaseAddress;
494   std::string BaseName = getIslCompatibleName("MemRef_", BaseAddr, "");
495 
496   bool isKnownMustAccess = false;
497 
498   // Accesses in single-basic block statements are always excuted.
499   if (Stmt->isBlockStmt())
500     isKnownMustAccess = true;
501 
502   if (Stmt->isRegionStmt()) {
503     // Accesses that dominate the exit block of a non-affine region are always
504     // executed. In non-affine regions there may exist MK_Values that do not
505     // dominate the exit. MK_Values will always dominate the exit and MK_PHIs
506     // only if there is at most one PHI_WRITE in the non-affine region.
507     if (DT.dominates(BB, Stmt->getRegion()->getExit()))
508       isKnownMustAccess = true;
509   }
510 
511   // Non-affine PHI writes do not "happen" at a particular instruction, but
512   // after exiting the statement. Therefore they are guaranteed to execute and
513   // overwrite the old value.
514   if (Kind == ScopArrayInfo::MK_PHI || Kind == ScopArrayInfo::MK_ExitPHI)
515     isKnownMustAccess = true;
516 
517   if (!isKnownMustAccess && AccType == MemoryAccess::MUST_WRITE)
518     AccType = MemoryAccess::MAY_WRITE;
519 
520   auto *Access =
521       new MemoryAccess(Stmt, Inst, AccType, BaseAddress, ElementType, Affine,
522                        Subscripts, Sizes, AccessValue, Kind, BaseName);
523 
524   scop->addAccessFunction(Access);
525   Stmt->addAccess(Access);
526   return Access;
527 }
528 
529 void ScopBuilder::addArrayAccess(
530     MemAccInst MemAccInst, MemoryAccess::AccessType AccType, Value *BaseAddress,
531     Type *ElementType, bool IsAffine, ArrayRef<const SCEV *> Subscripts,
532     ArrayRef<const SCEV *> Sizes, Value *AccessValue) {
533   ArrayBasePointers.insert(BaseAddress);
534   addMemoryAccess(MemAccInst->getParent(), MemAccInst, AccType, BaseAddress,
535                   ElementType, IsAffine, AccessValue, Subscripts, Sizes,
536                   ScopArrayInfo::MK_Array);
537 }
538 
539 void ScopBuilder::ensureValueWrite(Instruction *Inst) {
540   ScopStmt *Stmt = scop->getStmtFor(Inst);
541 
542   // Inst not defined within this SCoP.
543   if (!Stmt)
544     return;
545 
546   // Do not process further if the instruction is already written.
547   if (Stmt->lookupValueWriteOf(Inst))
548     return;
549 
550   addMemoryAccess(Inst->getParent(), Inst, MemoryAccess::MUST_WRITE, Inst,
551                   Inst->getType(), true, Inst, ArrayRef<const SCEV *>(),
552                   ArrayRef<const SCEV *>(), ScopArrayInfo::MK_Value);
553 }
554 
555 void ScopBuilder::ensureValueRead(Value *V, BasicBlock *UserBB) {
556 
557   // There cannot be an "access" for literal constants. BasicBlock references
558   // (jump destinations) also never change.
559   if ((isa<Constant>(V) && !isa<GlobalVariable>(V)) || isa<BasicBlock>(V))
560     return;
561 
562   // If the instruction can be synthesized and the user is in the region we do
563   // not need to add a value dependences.
564   auto *Scope = LI.getLoopFor(UserBB);
565   if (canSynthesize(V, *scop, &SE, Scope))
566     return;
567 
568   // Do not build scalar dependences for required invariant loads as we will
569   // hoist them later on anyway or drop the SCoP if we cannot.
570   auto &ScopRIL = scop->getRequiredInvariantLoads();
571   if (ScopRIL.count(dyn_cast<LoadInst>(V)))
572     return;
573 
574   // Determine the ScopStmt containing the value's definition and use. There is
575   // no defining ScopStmt if the value is a function argument, a global value,
576   // or defined outside the SCoP.
577   Instruction *ValueInst = dyn_cast<Instruction>(V);
578   ScopStmt *ValueStmt = ValueInst ? scop->getStmtFor(ValueInst) : nullptr;
579 
580   ScopStmt *UserStmt = scop->getStmtFor(UserBB);
581 
582   // We do not model uses outside the scop.
583   if (!UserStmt)
584     return;
585 
586   // Add MemoryAccess for invariant values only if requested.
587   if (!ModelReadOnlyScalars && !ValueStmt)
588     return;
589 
590   // Ignore use-def chains within the same ScopStmt.
591   if (ValueStmt == UserStmt)
592     return;
593 
594   // Do not create another MemoryAccess for reloading the value if one already
595   // exists.
596   if (UserStmt->lookupValueReadOf(V))
597     return;
598 
599   addMemoryAccess(UserBB, nullptr, MemoryAccess::READ, V, V->getType(), true, V,
600                   ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
601                   ScopArrayInfo::MK_Value);
602   if (ValueInst)
603     ensureValueWrite(ValueInst);
604 }
605 
606 void ScopBuilder::ensurePHIWrite(PHINode *PHI, BasicBlock *IncomingBlock,
607                                  Value *IncomingValue, bool IsExitBlock) {
608   // As the incoming block might turn out to be an error statement ensure we
609   // will create an exit PHI SAI object. It is needed during code generation
610   // and would be created later anyway.
611   if (IsExitBlock)
612     scop->getOrCreateScopArrayInfo(PHI, PHI->getType(), {},
613                                    ScopArrayInfo::MK_ExitPHI);
614 
615   ScopStmt *IncomingStmt = scop->getStmtFor(IncomingBlock);
616   if (!IncomingStmt)
617     return;
618 
619   // Take care for the incoming value being available in the incoming block.
620   // This must be done before the check for multiple PHI writes because multiple
621   // exiting edges from subregion each can be the effective written value of the
622   // subregion. As such, all of them must be made available in the subregion
623   // statement.
624   ensureValueRead(IncomingValue, IncomingBlock);
625 
626   // Do not add more than one MemoryAccess per PHINode and ScopStmt.
627   if (MemoryAccess *Acc = IncomingStmt->lookupPHIWriteOf(PHI)) {
628     assert(Acc->getAccessInstruction() == PHI);
629     Acc->addIncoming(IncomingBlock, IncomingValue);
630     return;
631   }
632 
633   MemoryAccess *Acc = addMemoryAccess(
634       IncomingStmt->getEntryBlock(), PHI, MemoryAccess::MUST_WRITE, PHI,
635       PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
636       ArrayRef<const SCEV *>(),
637       IsExitBlock ? ScopArrayInfo::MK_ExitPHI : ScopArrayInfo::MK_PHI);
638   assert(Acc);
639   Acc->addIncoming(IncomingBlock, IncomingValue);
640 }
641 
642 void ScopBuilder::addPHIReadAccess(PHINode *PHI) {
643   addMemoryAccess(PHI->getParent(), PHI, MemoryAccess::READ, PHI,
644                   PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
645                   ArrayRef<const SCEV *>(), ScopArrayInfo::MK_PHI);
646 }
647 
648 void ScopBuilder::buildScop(Region &R) {
649   scop.reset(new Scop(R, SE, LI, *SD.getDetectionContext(&R)));
650 
651   buildStmts(R);
652   buildAccessFunctions(R);
653 
654   // In case the region does not have an exiting block we will later (during
655   // code generation) split the exit block. This will move potential PHI nodes
656   // from the current exit block into the new region exiting block. Hence, PHI
657   // nodes that are at this point not part of the region will be.
658   // To handle these PHI nodes later we will now model their operands as scalar
659   // accesses. Note that we do not model anything in the exit block if we have
660   // an exiting block in the region, as there will not be any splitting later.
661   if (!scop->hasSingleExitEdge())
662     buildAccessFunctions(*R.getExit(), nullptr,
663                          /* IsExitBlock */ true);
664 
665   // Create memory accesses for global reads since all arrays are now known.
666   auto *AF = SE.getConstant(IntegerType::getInt64Ty(SE.getContext()), 0);
667   for (auto *GlobalRead : GlobalReads)
668     for (auto *BP : ArrayBasePointers)
669       addArrayAccess(MemAccInst(GlobalRead), MemoryAccess::READ, BP,
670                      BP->getType(), false, {AF}, {nullptr}, GlobalRead);
671 
672   scop->init(AA, DT, LI);
673 }
674 
675 ScopBuilder::ScopBuilder(Region *R, AliasAnalysis &AA, const DataLayout &DL,
676                          DominatorTree &DT, LoopInfo &LI, ScopDetection &SD,
677                          ScalarEvolution &SE)
678     : AA(AA), DL(DL), DT(DT), LI(LI), SD(SD), SE(SE) {
679 
680   Function *F = R->getEntry()->getParent();
681 
682   DebugLoc Beg, End;
683   getDebugLocations(getBBPairForRegion(R), Beg, End);
684   std::string Msg = "SCoP begins here.";
685   emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, Beg, Msg);
686 
687   buildScop(*R);
688 
689   DEBUG(scop->print(dbgs()));
690 
691   if (!scop->hasFeasibleRuntimeContext()) {
692     InfeasibleScops++;
693     Msg = "SCoP ends here but was dismissed.";
694     scop.reset();
695   } else {
696     Msg = "SCoP ends here.";
697     ++ScopFound;
698     if (scop->getMaxLoopDepth() > 0)
699       ++RichScopFound;
700   }
701 
702   emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, End, Msg);
703 }
704