1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the BlockGenerator and VectorBlockGenerator classes,
11 // which generate sequential code and vectorized code for a polyhedral
12 // statement, respectively.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "polly/ScopInfo.h"
17 #include "polly/CodeGen/BlockGenerators.h"
18 #include "polly/CodeGen/CodeGeneration.h"
19 #include "polly/CodeGen/IslExprBuilder.h"
20 #include "polly/CodeGen/RuntimeDebugBuilder.h"
21 #include "polly/Options.h"
22 #include "polly/Support/GICHelper.h"
23 #include "polly/Support/SCEVValidator.h"
24 #include "polly/Support/ScopHelper.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/RegionInfo.h"
27 #include "llvm/Analysis/ScalarEvolution.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "isl/aff.h"
33 #include "isl/ast.h"
34 #include "isl/ast_build.h"
35 #include "isl/set.h"
36 #include <deque>
37 
38 using namespace llvm;
39 using namespace polly;
40 
41 static cl::opt<bool> Aligned("enable-polly-aligned",
42                              cl::desc("Assumed aligned memory accesses."),
43                              cl::Hidden, cl::init(false), cl::ZeroOrMore,
44                              cl::cat(PollyCategory));
45 
46 static cl::opt<bool> DebugPrinting(
47     "polly-codegen-add-debug-printing",
48     cl::desc("Add printf calls that show the values loaded/stored."),
49     cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
50 
51 BlockGenerator::BlockGenerator(PollyIRBuilder &B, LoopInfo &LI,
52                                ScalarEvolution &SE, DominatorTree &DT,
53                                ScalarAllocaMapTy &ScalarMap,
54                                ScalarAllocaMapTy &PHIOpMap,
55                                EscapeUsersAllocaMapTy &EscapeMap,
56                                ValueMapT &GlobalMap,
57                                IslExprBuilder *ExprBuilder)
58     : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT),
59       EntryBB(nullptr), PHIOpMap(PHIOpMap), ScalarMap(ScalarMap),
60       EscapeMap(EscapeMap), GlobalMap(GlobalMap) {}
61 
62 Value *BlockGenerator::trySynthesizeNewValue(ScopStmt &Stmt, Value *Old,
63                                              ValueMapT &BBMap,
64                                              LoopToScevMapT &LTS,
65                                              Loop *L) const {
66   if (SE.isSCEVable(Old->getType()))
67     if (const SCEV *Scev = SE.getSCEVAtScope(const_cast<Value *>(Old), L)) {
68       if (!isa<SCEVCouldNotCompute>(Scev)) {
69         const SCEV *NewScev = apply(Scev, LTS, SE);
70         ValueMapT VTV;
71         VTV.insert(BBMap.begin(), BBMap.end());
72         VTV.insert(GlobalMap.begin(), GlobalMap.end());
73 
74         Scop &S = *Stmt.getParent();
75         const DataLayout &DL =
76             S.getRegion().getEntry()->getParent()->getParent()->getDataLayout();
77         auto IP = Builder.GetInsertPoint();
78 
79         assert(IP != Builder.GetInsertBlock()->end() &&
80                "Only instructions can be insert points for SCEVExpander");
81         Value *Expanded = expandCodeFor(S, SE, DL, "polly", NewScev,
82                                         Old->getType(), IP, &VTV);
83 
84         BBMap[Old] = Expanded;
85         return Expanded;
86       }
87     }
88 
89   return nullptr;
90 }
91 
92 Value *BlockGenerator::getNewValue(ScopStmt &Stmt, Value *Old, ValueMapT &BBMap,
93                                    LoopToScevMapT &LTS, Loop *L) const {
94   // We assume constants never change.
95   // This avoids map lookups for many calls to this function.
96   if (isa<Constant>(Old))
97     return const_cast<Value *>(Old);
98 
99   if (Value *New = GlobalMap.lookup(Old)) {
100     if (Value *NewRemapped = GlobalMap.lookup(New))
101       New = NewRemapped;
102     if (Old->getType()->getScalarSizeInBits() <
103         New->getType()->getScalarSizeInBits())
104       New = Builder.CreateTruncOrBitCast(New, Old->getType());
105 
106     return New;
107   }
108 
109   if (Value *New = BBMap.lookup(Old))
110     return New;
111 
112   if (Value *New = trySynthesizeNewValue(Stmt, Old, BBMap, LTS, L))
113     return New;
114 
115   // A scop-constant value defined by a global or a function parameter.
116   if (isa<GlobalValue>(Old) || isa<Argument>(Old))
117     return const_cast<Value *>(Old);
118 
119   // A scop-constant value defined by an instruction executed outside the scop.
120   if (const Instruction *Inst = dyn_cast<Instruction>(Old))
121     if (!Stmt.getParent()->getRegion().contains(Inst->getParent()))
122       return const_cast<Value *>(Old);
123 
124   // The scalar dependence is neither available nor SCEVCodegenable.
125   llvm_unreachable("Unexpected scalar dependence in region!");
126   return nullptr;
127 }
128 
129 void BlockGenerator::copyInstScalar(ScopStmt &Stmt, Instruction *Inst,
130                                     ValueMapT &BBMap, LoopToScevMapT &LTS) {
131   // We do not generate debug intrinsics as we did not investigate how to
132   // copy them correctly. At the current state, they just crash the code
133   // generation as the meta-data operands are not correctly copied.
134   if (isa<DbgInfoIntrinsic>(Inst))
135     return;
136 
137   Instruction *NewInst = Inst->clone();
138 
139   // Replace old operands with the new ones.
140   for (Value *OldOperand : Inst->operands()) {
141     Value *NewOperand =
142         getNewValue(Stmt, OldOperand, BBMap, LTS, getLoopForInst(Inst));
143 
144     if (!NewOperand) {
145       assert(!isa<StoreInst>(NewInst) &&
146              "Store instructions are always needed!");
147       delete NewInst;
148       return;
149     }
150 
151     NewInst->replaceUsesOfWith(OldOperand, NewOperand);
152   }
153 
154   Builder.Insert(NewInst);
155   BBMap[Inst] = NewInst;
156 
157   if (!NewInst->getType()->isVoidTy())
158     NewInst->setName("p_" + Inst->getName());
159 }
160 
161 Value *BlockGenerator::generateLocationAccessed(
162     ScopStmt &Stmt, const Instruction *Inst, Value *Pointer, ValueMapT &BBMap,
163     LoopToScevMapT &LTS, isl_id_to_ast_expr *NewAccesses) {
164   const MemoryAccess &MA = Stmt.getAccessFor(Inst);
165 
166   isl_ast_expr *AccessExpr = isl_id_to_ast_expr_get(NewAccesses, MA.getId());
167 
168   if (AccessExpr) {
169     AccessExpr = isl_ast_expr_address_of(AccessExpr);
170     auto Address = ExprBuilder->create(AccessExpr);
171 
172     // Cast the address of this memory access to a pointer type that has the
173     // same element type as the original access, but uses the address space of
174     // the newly generated pointer.
175     auto OldPtrTy = MA.getAccessValue()->getType()->getPointerTo();
176     auto NewPtrTy = Address->getType();
177     OldPtrTy = PointerType::get(OldPtrTy->getElementType(),
178                                 NewPtrTy->getPointerAddressSpace());
179 
180     if (OldPtrTy != NewPtrTy) {
181       assert(OldPtrTy->getPointerElementType()->getPrimitiveSizeInBits() ==
182                  NewPtrTy->getPointerElementType()->getPrimitiveSizeInBits() &&
183              "Pointer types to elements with different size found");
184       Address = Builder.CreateBitOrPointerCast(Address, OldPtrTy);
185     }
186     return Address;
187   }
188 
189   return getNewValue(Stmt, Pointer, BBMap, LTS, getLoopForInst(Inst));
190 }
191 
192 Loop *BlockGenerator::getLoopForInst(const llvm::Instruction *Inst) {
193   return LI.getLoopFor(Inst->getParent());
194 }
195 
196 Value *BlockGenerator::generateScalarLoad(ScopStmt &Stmt, LoadInst *Load,
197                                           ValueMapT &BBMap, LoopToScevMapT &LTS,
198                                           isl_id_to_ast_expr *NewAccesses) {
199   if (Value *PreloadLoad = GlobalMap.lookup(Load))
200     return PreloadLoad;
201 
202   auto *Pointer = Load->getPointerOperand();
203   Value *NewPointer =
204       generateLocationAccessed(Stmt, Load, Pointer, BBMap, LTS, NewAccesses);
205   Value *ScalarLoad = Builder.CreateAlignedLoad(
206       NewPointer, Load->getAlignment(), Load->getName() + "_p_scalar_");
207 
208   if (DebugPrinting)
209     RuntimeDebugBuilder::createCPUPrinter(Builder, "Load from ", NewPointer,
210                                           ": ", ScalarLoad, "\n");
211 
212   return ScalarLoad;
213 }
214 
215 void BlockGenerator::generateScalarStore(ScopStmt &Stmt, StoreInst *Store,
216                                          ValueMapT &BBMap, LoopToScevMapT &LTS,
217                                          isl_id_to_ast_expr *NewAccesses) {
218   auto *Pointer = Store->getPointerOperand();
219   Value *NewPointer =
220       generateLocationAccessed(Stmt, Store, Pointer, BBMap, LTS, NewAccesses);
221   Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap, LTS,
222                                     getLoopForInst(Store));
223 
224   if (DebugPrinting)
225     RuntimeDebugBuilder::createCPUPrinter(Builder, "Store to  ", NewPointer,
226                                           ": ", ValueOperand, "\n");
227 
228   Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlignment());
229 }
230 
231 void BlockGenerator::copyInstruction(ScopStmt &Stmt, Instruction *Inst,
232                                      ValueMapT &BBMap, LoopToScevMapT &LTS,
233                                      isl_id_to_ast_expr *NewAccesses) {
234 
235   // First check for possible scalar dependences for this instruction.
236   generateScalarLoads(Stmt, Inst, BBMap);
237 
238   // Terminator instructions control the control flow. They are explicitly
239   // expressed in the clast and do not need to be copied.
240   if (Inst->isTerminator())
241     return;
242 
243   Loop *L = getLoopForInst(Inst);
244   if ((Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) &&
245       canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) {
246     // Synthesizable statements will be generated on-demand.
247     return;
248   }
249 
250   if (auto *Load = dyn_cast<LoadInst>(Inst)) {
251     Value *NewLoad = generateScalarLoad(Stmt, Load, BBMap, LTS, NewAccesses);
252     // Compute NewLoad before its insertion in BBMap to make the insertion
253     // deterministic.
254     BBMap[Load] = NewLoad;
255     return;
256   }
257 
258   if (auto *Store = dyn_cast<StoreInst>(Inst)) {
259     generateScalarStore(Stmt, Store, BBMap, LTS, NewAccesses);
260     return;
261   }
262 
263   if (auto *PHI = dyn_cast<PHINode>(Inst)) {
264     copyPHIInstruction(Stmt, PHI, BBMap, LTS);
265     return;
266   }
267 
268   // Skip some special intrinsics for which we do not adjust the semantics to
269   // the new schedule. All others are handled like every other instruction.
270   if (isIgnoredIntrinsic(Inst))
271     return;
272 
273   copyInstScalar(Stmt, Inst, BBMap, LTS);
274 }
275 
276 void BlockGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT &LTS,
277                               isl_id_to_ast_expr *NewAccesses) {
278   assert(Stmt.isBlockStmt() &&
279          "Only block statements can be copied by the block generator");
280 
281   ValueMapT BBMap;
282 
283   BasicBlock *BB = Stmt.getBasicBlock();
284   copyBB(Stmt, BB, BBMap, LTS, NewAccesses);
285 }
286 
287 BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) {
288   BasicBlock *CopyBB =
289       SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI);
290   CopyBB->setName("polly.stmt." + BB->getName());
291   return CopyBB;
292 }
293 
294 BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB,
295                                    ValueMapT &BBMap, LoopToScevMapT &LTS,
296                                    isl_id_to_ast_expr *NewAccesses) {
297   BasicBlock *CopyBB = splitBB(BB);
298   copyBB(Stmt, BB, CopyBB, BBMap, LTS, NewAccesses);
299   return CopyBB;
300 }
301 
302 void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB,
303                             ValueMapT &BBMap, LoopToScevMapT &LTS,
304                             isl_id_to_ast_expr *NewAccesses) {
305   Builder.SetInsertPoint(CopyBB->begin());
306   EntryBB = &CopyBB->getParent()->getEntryBlock();
307 
308   for (Instruction &Inst : *BB)
309     copyInstruction(Stmt, &Inst, BBMap, LTS, NewAccesses);
310 
311   // After a basic block was copied store all scalars that escape this block
312   // in their alloca. First the scalars that have dependences inside the SCoP,
313   // then the ones that might escape the SCoP.
314   generateScalarStores(Stmt, BB, LTS, BBMap);
315 
316   const Region &R = Stmt.getParent()->getRegion();
317   for (Instruction &Inst : *BB)
318     handleOutsideUsers(R, &Inst, BBMap[&Inst]);
319 }
320 
321 Value *BlockGenerator::getOrCreateAlloca(Value *ScalarBase,
322                                          ScalarAllocaMapTy &Map,
323                                          const char *NameExt) {
324   // If no alloca was found create one and insert it in the entry block.
325   if (!Map.count(ScalarBase)) {
326     auto *Ty = ScalarBase->getType();
327     auto NewAddr = new AllocaInst(Ty, ScalarBase->getName() + NameExt);
328     EntryBB = &Builder.GetInsertBlock()->getParent()->getEntryBlock();
329     NewAddr->insertBefore(EntryBB->getFirstInsertionPt());
330     Map[ScalarBase] = NewAddr;
331   }
332 
333   auto Addr = Map[ScalarBase];
334 
335   if (GlobalMap.count(Addr))
336     return GlobalMap[Addr];
337 
338   return Addr;
339 }
340 
341 Value *BlockGenerator::getOrCreateAlloca(MemoryAccess &Access) {
342   if (Access.getScopArrayInfo()->isPHI())
343     return getOrCreatePHIAlloca(Access.getBaseAddr());
344   else
345     return getOrCreateScalarAlloca(Access.getBaseAddr());
346 }
347 
348 Value *BlockGenerator::getOrCreateScalarAlloca(Value *ScalarBase) {
349   return getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a");
350 }
351 
352 Value *BlockGenerator::getOrCreatePHIAlloca(Value *ScalarBase) {
353   return getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops");
354 }
355 
356 void BlockGenerator::handleOutsideUsers(const Region &R, Instruction *Inst,
357                                         Value *InstCopy, Value *Address) {
358   // If there are escape users we get the alloca for this instruction and put it
359   // in the EscapeMap for later finalization. Lastly, if the instruction was
360   // copied multiple times we already did this and can exit.
361   if (EscapeMap.count(Inst))
362     return;
363 
364   EscapeUserVectorTy EscapeUsers;
365   for (User *U : Inst->users()) {
366 
367     // Non-instruction user will never escape.
368     Instruction *UI = dyn_cast<Instruction>(U);
369     if (!UI)
370       continue;
371 
372     if (R.contains(UI))
373       continue;
374 
375     EscapeUsers.push_back(UI);
376   }
377 
378   // Exit if no escape uses were found.
379   if (EscapeUsers.empty())
380     return;
381 
382   // Get or create an escape alloca for this instruction.
383   auto *ScalarAddr = Address ? Address : getOrCreateScalarAlloca(Inst);
384 
385   // Remember that this instruction has escape uses and the escape alloca.
386   EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers));
387 }
388 
389 void BlockGenerator::generateScalarLoads(ScopStmt &Stmt,
390                                          const Instruction *Inst,
391                                          ValueMapT &BBMap) {
392   auto *MAL = Stmt.lookupAccessesFor(Inst);
393 
394   if (!MAL)
395     return;
396 
397   for (MemoryAccess *MA : *MAL) {
398     if (MA->isExplicit() || !MA->isRead())
399       continue;
400 
401     auto *Address = getOrCreateAlloca(*MA);
402     BBMap[MA->getBaseAddr()] =
403         Builder.CreateLoad(Address, Address->getName() + ".reload");
404   }
405 }
406 
407 Value *BlockGenerator::getNewScalarValue(Value *ScalarValue, const Region &R,
408                                          ScopStmt &Stmt, LoopToScevMapT &LTS,
409                                          ValueMapT &BBMap) {
410   // If the value we want to store is an instruction we might have demoted it
411   // in order to make it accessible here. In such a case a reload is
412   // necessary. If it is no instruction it will always be a value that
413   // dominates the current point and we can just use it. In total there are 4
414   // options:
415   //  (1) The value is no instruction ==> use the value.
416   //  (2) The value is an instruction that was split out of the region prior to
417   //      code generation  ==> use the instruction as it dominates the region.
418   //  (3) The value is an instruction:
419   //      (a) The value was defined in the current block, thus a copy is in
420   //          the BBMap ==> use the mapped value.
421   //      (b) The value was defined in a previous block, thus we demoted it
422   //          earlier ==> use the reloaded value.
423   Instruction *ScalarValueInst = dyn_cast<Instruction>(ScalarValue);
424   if (!ScalarValueInst)
425     return ScalarValue;
426 
427   if (!R.contains(ScalarValueInst)) {
428     if (Value *ScalarValueCopy = GlobalMap.lookup(ScalarValueInst))
429       return /* Case (3a) */ ScalarValueCopy;
430     else
431       return /* Case 2 */ ScalarValue;
432   }
433 
434   if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst))
435     return /* Case (3a) */ ScalarValueCopy;
436 
437   if ((Stmt.isBlockStmt() &&
438        Stmt.getBasicBlock() == ScalarValueInst->getParent()) ||
439       (Stmt.isRegionStmt() && Stmt.getRegion()->contains(ScalarValueInst))) {
440     auto SynthesizedValue = trySynthesizeNewValue(
441         Stmt, ScalarValueInst, BBMap, LTS, getLoopForInst(ScalarValueInst));
442 
443     if (SynthesizedValue)
444       return SynthesizedValue;
445   }
446 
447   // Case (3b)
448   Value *Address = getOrCreateScalarAlloca(ScalarValueInst);
449   ScalarValue = Builder.CreateLoad(Address, Address->getName() + ".reload");
450 
451   return ScalarValue;
452 }
453 
454 void BlockGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB,
455                                           LoopToScevMapT &LTS,
456                                           ValueMapT &BBMap) {
457   const Region &R = Stmt.getParent()->getRegion();
458 
459   assert(Stmt.isBlockStmt() && BB == Stmt.getBasicBlock() &&
460          "Region statements need to use the generateScalarStores() "
461          "function in the RegionGenerator");
462 
463   for (MemoryAccess *MA : Stmt) {
464     if (MA->isExplicit() || MA->isRead())
465       continue;
466 
467     Value *Val = MA->getAccessValue();
468     auto *Address = getOrCreateAlloca(*MA);
469 
470     Val = getNewScalarValue(Val, R, Stmt, LTS, BBMap);
471     Builder.CreateStore(Val, Address);
472   }
473 }
474 
475 void BlockGenerator::createScalarInitialization(Scop &S) {
476   Region &R = S.getRegion();
477   // The split block __just before__ the region and optimized region.
478   BasicBlock *SplitBB = R.getEnteringBlock();
479   BranchInst *SplitBBTerm = cast<BranchInst>(SplitBB->getTerminator());
480   assert(SplitBBTerm->getNumSuccessors() == 2 && "Bad region entering block!");
481 
482   // Get the start block of the __optimized__ region.
483   BasicBlock *StartBB = SplitBBTerm->getSuccessor(0);
484   if (StartBB == R.getEntry())
485     StartBB = SplitBBTerm->getSuccessor(1);
486 
487   Builder.SetInsertPoint(StartBB->getTerminator());
488 
489   for (auto &Pair : S.arrays()) {
490     auto &Array = Pair.second;
491     if (Array->getNumberOfDimensions() != 0)
492       continue;
493     if (Array->isPHI()) {
494       // For PHI nodes, the only values we need to store are the ones that
495       // reach the PHI node from outside the region. In general there should
496       // only be one such incoming edge and this edge should enter through
497       // 'SplitBB'.
498       auto PHI = cast<PHINode>(Array->getBasePtr());
499 
500       for (auto BI = PHI->block_begin(), BE = PHI->block_end(); BI != BE; BI++)
501         if (!R.contains(*BI) && *BI != SplitBB)
502           llvm_unreachable("Incoming edges from outside the scop should always "
503                            "come from SplitBB");
504 
505       int Idx = PHI->getBasicBlockIndex(SplitBB);
506       if (Idx < 0)
507         continue;
508 
509       Value *ScalarValue = PHI->getIncomingValue(Idx);
510 
511       Builder.CreateStore(ScalarValue, getOrCreatePHIAlloca(PHI));
512       continue;
513     }
514 
515     auto *Inst = dyn_cast<Instruction>(Array->getBasePtr());
516 
517     if (Inst && R.contains(Inst))
518       continue;
519 
520     // PHI nodes that are not marked as such in their SAI object are exit PHI
521     // nodes we model as common scalars but do not need to initialize.
522     if (Inst && isa<PHINode>(Inst))
523       continue;
524 
525     ValueMapT EmptyMap;
526     Builder.CreateStore(Array->getBasePtr(),
527                         getOrCreateScalarAlloca(Array->getBasePtr()));
528   }
529 }
530 
531 void BlockGenerator::createScalarFinalization(Region &R) {
532   // The exit block of the __unoptimized__ region.
533   BasicBlock *ExitBB = R.getExitingBlock();
534   // The merge block __just after__ the region and the optimized region.
535   BasicBlock *MergeBB = R.getExit();
536 
537   // The exit block of the __optimized__ region.
538   BasicBlock *OptExitBB = *(pred_begin(MergeBB));
539   if (OptExitBB == ExitBB)
540     OptExitBB = *(++pred_begin(MergeBB));
541 
542   Builder.SetInsertPoint(OptExitBB->getTerminator());
543   for (const auto &EscapeMapping : EscapeMap) {
544     // Extract the escaping instruction and the escaping users as well as the
545     // alloca the instruction was demoted to.
546     Instruction *EscapeInst = EscapeMapping.getFirst();
547     const auto &EscapeMappingValue = EscapeMapping.getSecond();
548     const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second;
549     Value *ScalarAddr = EscapeMappingValue.first;
550 
551     // Reload the demoted instruction in the optimized version of the SCoP.
552     Instruction *EscapeInstReload =
553         Builder.CreateLoad(ScalarAddr, EscapeInst->getName() + ".final_reload");
554 
555     // Create the merge PHI that merges the optimized and unoptimized version.
556     PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2,
557                                         EscapeInst->getName() + ".merge");
558     MergePHI->insertBefore(MergeBB->getFirstInsertionPt());
559 
560     // Add the respective values to the merge PHI.
561     MergePHI->addIncoming(EscapeInstReload, OptExitBB);
562     MergePHI->addIncoming(EscapeInst, ExitBB);
563 
564     // The information of scalar evolution about the escaping instruction needs
565     // to be revoked so the new merged instruction will be used.
566     if (SE.isSCEVable(EscapeInst->getType()))
567       SE.forgetValue(EscapeInst);
568 
569     // Replace all uses of the demoted instruction with the merge PHI.
570     for (Instruction *EUser : EscapeUsers)
571       EUser->replaceUsesOfWith(EscapeInst, MergePHI);
572   }
573 }
574 
575 void BlockGenerator::finalizeSCoP(Scop &S) {
576 
577   // Handle PHI nodes that were in the original exit and are now
578   // moved into the region exiting block.
579   if (!S.hasSingleExitEdge()) {
580     for (Instruction &I : *S.getRegion().getExitingBlock()) {
581       PHINode *PHI = dyn_cast<PHINode>(&I);
582       if (!PHI)
583         break;
584 
585       assert(PHI->getNumUses() == 1);
586       assert(ScalarMap.count(PHI->user_back()));
587 
588       handleOutsideUsers(S.getRegion(), PHI, nullptr,
589                          ScalarMap[PHI->user_back()]);
590     }
591   }
592 
593   createScalarInitialization(S);
594   createScalarFinalization(S.getRegion());
595 }
596 
597 VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen,
598                                            std::vector<LoopToScevMapT> &VLTS,
599                                            isl_map *Schedule)
600     : BlockGenerator(BlockGen), VLTS(VLTS), Schedule(Schedule) {
601   assert(Schedule && "No statement domain provided");
602 }
603 
604 Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, Value *Old,
605                                             ValueMapT &VectorMap,
606                                             VectorValueMapT &ScalarMaps,
607                                             Loop *L) {
608   if (Value *NewValue = VectorMap.lookup(Old))
609     return NewValue;
610 
611   int Width = getVectorWidth();
612 
613   Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width));
614 
615   for (int Lane = 0; Lane < Width; Lane++)
616     Vector = Builder.CreateInsertElement(
617         Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], VLTS[Lane], L),
618         Builder.getInt32(Lane));
619 
620   VectorMap[Old] = Vector;
621 
622   return Vector;
623 }
624 
625 Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) {
626   PointerType *PointerTy = dyn_cast<PointerType>(Val->getType());
627   assert(PointerTy && "PointerType expected");
628 
629   Type *ScalarType = PointerTy->getElementType();
630   VectorType *VectorType = VectorType::get(ScalarType, Width);
631 
632   return PointerType::getUnqual(VectorType);
633 }
634 
635 Value *VectorBlockGenerator::generateStrideOneLoad(
636     ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps,
637     __isl_keep isl_id_to_ast_expr *NewAccesses, bool NegativeStride = false) {
638   unsigned VectorWidth = getVectorWidth();
639   auto *Pointer = Load->getPointerOperand();
640   Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth);
641   unsigned Offset = NegativeStride ? VectorWidth - 1 : 0;
642 
643   Value *NewPointer = nullptr;
644   NewPointer = generateLocationAccessed(Stmt, Load, Pointer, ScalarMaps[Offset],
645                                         VLTS[Offset], NewAccesses);
646   Value *VectorPtr =
647       Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
648   LoadInst *VecLoad =
649       Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full");
650   if (!Aligned)
651     VecLoad->setAlignment(8);
652 
653   if (NegativeStride) {
654     SmallVector<Constant *, 16> Indices;
655     for (int i = VectorWidth - 1; i >= 0; i--)
656       Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i));
657     Constant *SV = llvm::ConstantVector::get(Indices);
658     Value *RevVecLoad = Builder.CreateShuffleVector(
659         VecLoad, VecLoad, SV, Load->getName() + "_reverse");
660     return RevVecLoad;
661   }
662 
663   return VecLoad;
664 }
665 
666 Value *VectorBlockGenerator::generateStrideZeroLoad(
667     ScopStmt &Stmt, LoadInst *Load, ValueMapT &BBMap,
668     __isl_keep isl_id_to_ast_expr *NewAccesses) {
669   auto *Pointer = Load->getPointerOperand();
670   Type *VectorPtrType = getVectorPtrTy(Pointer, 1);
671   Value *NewPointer = generateLocationAccessed(Stmt, Load, Pointer, BBMap,
672                                                VLTS[0], NewAccesses);
673   Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType,
674                                            Load->getName() + "_p_vec_p");
675   LoadInst *ScalarLoad =
676       Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one");
677 
678   if (!Aligned)
679     ScalarLoad->setAlignment(8);
680 
681   Constant *SplatVector = Constant::getNullValue(
682       VectorType::get(Builder.getInt32Ty(), getVectorWidth()));
683 
684   Value *VectorLoad = Builder.CreateShuffleVector(
685       ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat");
686   return VectorLoad;
687 }
688 
689 Value *VectorBlockGenerator::generateUnknownStrideLoad(
690     ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps,
691     __isl_keep isl_id_to_ast_expr *NewAccesses) {
692   int VectorWidth = getVectorWidth();
693   auto *Pointer = Load->getPointerOperand();
694   VectorType *VectorType = VectorType::get(
695       dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth);
696 
697   Value *Vector = UndefValue::get(VectorType);
698 
699   for (int i = 0; i < VectorWidth; i++) {
700     Value *NewPointer = generateLocationAccessed(
701         Stmt, Load, Pointer, ScalarMaps[i], VLTS[i], NewAccesses);
702     Value *ScalarLoad =
703         Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_");
704     Vector = Builder.CreateInsertElement(
705         Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_");
706   }
707 
708   return Vector;
709 }
710 
711 void VectorBlockGenerator::generateLoad(
712     ScopStmt &Stmt, LoadInst *Load, ValueMapT &VectorMap,
713     VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
714   if (Value *PreloadLoad = GlobalMap.lookup(Load)) {
715     VectorMap[Load] = Builder.CreateVectorSplat(getVectorWidth(), PreloadLoad,
716                                                 Load->getName() + "_p");
717     return;
718   }
719 
720   if (!VectorType::isValidElementType(Load->getType())) {
721     for (int i = 0; i < getVectorWidth(); i++)
722       ScalarMaps[i][Load] =
723           generateScalarLoad(Stmt, Load, ScalarMaps[i], VLTS[i], NewAccesses);
724     return;
725   }
726 
727   const MemoryAccess &Access = Stmt.getAccessFor(Load);
728 
729   // Make sure we have scalar values available to access the pointer to
730   // the data location.
731   extractScalarValues(Load, VectorMap, ScalarMaps);
732 
733   Value *NewLoad;
734   if (Access.isStrideZero(isl_map_copy(Schedule)))
735     NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0], NewAccesses);
736   else if (Access.isStrideOne(isl_map_copy(Schedule)))
737     NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses);
738   else if (Access.isStrideX(isl_map_copy(Schedule), -1))
739     NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses, true);
740   else
741     NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps, NewAccesses);
742 
743   VectorMap[Load] = NewLoad;
744 }
745 
746 void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, UnaryInstruction *Inst,
747                                          ValueMapT &VectorMap,
748                                          VectorValueMapT &ScalarMaps) {
749   int VectorWidth = getVectorWidth();
750   Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap,
751                                      ScalarMaps, getLoopForInst(Inst));
752 
753   assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction");
754 
755   const CastInst *Cast = dyn_cast<CastInst>(Inst);
756   VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth);
757   VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType);
758 }
759 
760 void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, BinaryOperator *Inst,
761                                           ValueMapT &VectorMap,
762                                           VectorValueMapT &ScalarMaps) {
763   Loop *L = getLoopForInst(Inst);
764   Value *OpZero = Inst->getOperand(0);
765   Value *OpOne = Inst->getOperand(1);
766 
767   Value *NewOpZero, *NewOpOne;
768   NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L);
769   NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L);
770 
771   Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne,
772                                        Inst->getName() + "p_vec");
773   VectorMap[Inst] = NewInst;
774 }
775 
776 void VectorBlockGenerator::copyStore(
777     ScopStmt &Stmt, StoreInst *Store, ValueMapT &VectorMap,
778     VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
779   const MemoryAccess &Access = Stmt.getAccessFor(Store);
780 
781   auto *Pointer = Store->getPointerOperand();
782   Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap,
783                                  ScalarMaps, getLoopForInst(Store));
784 
785   // Make sure we have scalar values available to access the pointer to
786   // the data location.
787   extractScalarValues(Store, VectorMap, ScalarMaps);
788 
789   if (Access.isStrideOne(isl_map_copy(Schedule))) {
790     Type *VectorPtrType = getVectorPtrTy(Pointer, getVectorWidth());
791     Value *NewPointer = generateLocationAccessed(
792         Stmt, Store, Pointer, ScalarMaps[0], VLTS[0], NewAccesses);
793 
794     Value *VectorPtr =
795         Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
796     StoreInst *Store = Builder.CreateStore(Vector, VectorPtr);
797 
798     if (!Aligned)
799       Store->setAlignment(8);
800   } else {
801     for (unsigned i = 0; i < ScalarMaps.size(); i++) {
802       Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i));
803       Value *NewPointer = generateLocationAccessed(
804           Stmt, Store, Pointer, ScalarMaps[i], VLTS[i], NewAccesses);
805       Builder.CreateStore(Scalar, NewPointer);
806     }
807   }
808 }
809 
810 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst,
811                                              ValueMapT &VectorMap) {
812   for (Value *Operand : Inst->operands())
813     if (VectorMap.count(Operand))
814       return true;
815   return false;
816 }
817 
818 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst,
819                                                ValueMapT &VectorMap,
820                                                VectorValueMapT &ScalarMaps) {
821   bool HasVectorOperand = false;
822   int VectorWidth = getVectorWidth();
823 
824   for (Value *Operand : Inst->operands()) {
825     ValueMapT::iterator VecOp = VectorMap.find(Operand);
826 
827     if (VecOp == VectorMap.end())
828       continue;
829 
830     HasVectorOperand = true;
831     Value *NewVector = VecOp->second;
832 
833     for (int i = 0; i < VectorWidth; ++i) {
834       ValueMapT &SM = ScalarMaps[i];
835 
836       // If there is one scalar extracted, all scalar elements should have
837       // already been extracted by the code here. So no need to check for the
838       // existance of all of them.
839       if (SM.count(Operand))
840         break;
841 
842       SM[Operand] =
843           Builder.CreateExtractElement(NewVector, Builder.getInt32(i));
844     }
845   }
846 
847   return HasVectorOperand;
848 }
849 
850 void VectorBlockGenerator::copyInstScalarized(
851     ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
852     VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
853   bool HasVectorOperand;
854   int VectorWidth = getVectorWidth();
855 
856   HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps);
857 
858   for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++)
859     BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane],
860                                     VLTS[VectorLane], NewAccesses);
861 
862   if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand)
863     return;
864 
865   // Make the result available as vector value.
866   VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth);
867   Value *Vector = UndefValue::get(VectorType);
868 
869   for (int i = 0; i < VectorWidth; i++)
870     Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst],
871                                          Builder.getInt32(i));
872 
873   VectorMap[Inst] = Vector;
874 }
875 
876 int VectorBlockGenerator::getVectorWidth() { return VLTS.size(); }
877 
878 void VectorBlockGenerator::copyInstruction(
879     ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
880     VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
881   // Terminator instructions control the control flow. They are explicitly
882   // expressed in the clast and do not need to be copied.
883   if (Inst->isTerminator())
884     return;
885 
886   if (canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion()))
887     return;
888 
889   if (auto *Load = dyn_cast<LoadInst>(Inst)) {
890     generateLoad(Stmt, Load, VectorMap, ScalarMaps, NewAccesses);
891     return;
892   }
893 
894   if (hasVectorOperands(Inst, VectorMap)) {
895     if (auto *Store = dyn_cast<StoreInst>(Inst)) {
896       copyStore(Stmt, Store, VectorMap, ScalarMaps, NewAccesses);
897       return;
898     }
899 
900     if (auto *Unary = dyn_cast<UnaryInstruction>(Inst)) {
901       copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps);
902       return;
903     }
904 
905     if (auto *Binary = dyn_cast<BinaryOperator>(Inst)) {
906       copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps);
907       return;
908     }
909 
910     // Falltrough: We generate scalar instructions, if we don't know how to
911     // generate vector code.
912   }
913 
914   copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps, NewAccesses);
915 }
916 
917 void VectorBlockGenerator::copyStmt(
918     ScopStmt &Stmt, __isl_keep isl_id_to_ast_expr *NewAccesses) {
919   assert(Stmt.isBlockStmt() && "TODO: Only block statements can be copied by "
920                                "the vector block generator");
921 
922   BasicBlock *BB = Stmt.getBasicBlock();
923   BasicBlock *CopyBB =
924       SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI);
925   CopyBB->setName("polly.stmt." + BB->getName());
926   Builder.SetInsertPoint(CopyBB->begin());
927 
928   // Create two maps that store the mapping from the original instructions of
929   // the old basic block to their copies in the new basic block. Those maps
930   // are basic block local.
931   //
932   // As vector code generation is supported there is one map for scalar values
933   // and one for vector values.
934   //
935   // In case we just do scalar code generation, the vectorMap is not used and
936   // the scalarMap has just one dimension, which contains the mapping.
937   //
938   // In case vector code generation is done, an instruction may either appear
939   // in the vector map once (as it is calculating >vectorwidth< values at a
940   // time. Or (if the values are calculated using scalar operations), it
941   // appears once in every dimension of the scalarMap.
942   VectorValueMapT ScalarBlockMap(getVectorWidth());
943   ValueMapT VectorBlockMap;
944 
945   for (Instruction &Inst : *BB)
946     copyInstruction(Stmt, &Inst, VectorBlockMap, ScalarBlockMap, NewAccesses);
947 }
948 
949 BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB,
950                                              BasicBlock *BBCopy) {
951 
952   BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock();
953   BasicBlock *BBCopyIDom = BlockMap.lookup(BBIDom);
954 
955   if (BBCopyIDom)
956     DT.changeImmediateDominator(BBCopy, BBCopyIDom);
957 
958   return BBCopyIDom;
959 }
960 
961 void RegionGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT &LTS,
962                                isl_id_to_ast_expr *IdToAstExp) {
963   assert(Stmt.isRegionStmt() &&
964          "Only region statements can be copied by the region generator");
965 
966   // Forget all old mappings.
967   BlockMap.clear();
968   RegionMaps.clear();
969   IncompletePHINodeMap.clear();
970 
971   // The region represented by the statement.
972   Region *R = Stmt.getRegion();
973 
974   // Create a dedicated entry for the region where we can reload all demoted
975   // inputs.
976   BasicBlock *EntryBB = R->getEntry();
977   BasicBlock *EntryBBCopy =
978       SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI);
979   EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry");
980   Builder.SetInsertPoint(EntryBBCopy->begin());
981 
982   for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI)
983     if (!R->contains(*PI))
984       BlockMap[*PI] = EntryBBCopy;
985 
986   // Iterate over all blocks in the region in a breadth-first search.
987   std::deque<BasicBlock *> Blocks;
988   SmallPtrSet<BasicBlock *, 8> SeenBlocks;
989   Blocks.push_back(EntryBB);
990   SeenBlocks.insert(EntryBB);
991 
992   while (!Blocks.empty()) {
993     BasicBlock *BB = Blocks.front();
994     Blocks.pop_front();
995 
996     // First split the block and update dominance information.
997     BasicBlock *BBCopy = splitBB(BB);
998     BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy);
999 
1000     // In order to remap PHI nodes we store also basic block mappings.
1001     BlockMap[BB] = BBCopy;
1002 
1003     // Get the mapping for this block and initialize it with the mapping
1004     // available at its immediate dominator (in the new region).
1005     ValueMapT &RegionMap = RegionMaps[BBCopy];
1006     RegionMap = RegionMaps[BBCopyIDom];
1007 
1008     // Copy the block with the BlockGenerator.
1009     copyBB(Stmt, BB, BBCopy, RegionMap, LTS, IdToAstExp);
1010 
1011     // In order to remap PHI nodes we store also basic block mappings.
1012     BlockMap[BB] = BBCopy;
1013 
1014     // Add values to incomplete PHI nodes waiting for this block to be copied.
1015     for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB])
1016       addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, LTS);
1017     IncompletePHINodeMap[BB].clear();
1018 
1019     // And continue with new successors inside the region.
1020     for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++)
1021       if (R->contains(*SI) && SeenBlocks.insert(*SI).second)
1022         Blocks.push_back(*SI);
1023   }
1024 
1025   // Now create a new dedicated region exit block and add it to the region map.
1026   BasicBlock *ExitBBCopy =
1027       SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI);
1028   ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit");
1029   BlockMap[R->getExit()] = ExitBBCopy;
1030 
1031   repairDominance(R->getExit(), ExitBBCopy);
1032 
1033   // As the block generator doesn't handle control flow we need to add the
1034   // region control flow by hand after all blocks have been copied.
1035   for (BasicBlock *BB : SeenBlocks) {
1036 
1037     BasicBlock *BBCopy = BlockMap[BB];
1038     TerminatorInst *TI = BB->getTerminator();
1039     if (isa<UnreachableInst>(TI)) {
1040       while (!BBCopy->empty())
1041         BBCopy->begin()->eraseFromParent();
1042       new UnreachableInst(BBCopy->getContext(), BBCopy);
1043       continue;
1044     }
1045 
1046     Instruction *BICopy = BBCopy->getTerminator();
1047 
1048     ValueMapT &RegionMap = RegionMaps[BBCopy];
1049     RegionMap.insert(BlockMap.begin(), BlockMap.end());
1050 
1051     Builder.SetInsertPoint(BICopy);
1052     copyInstScalar(Stmt, TI, RegionMap, LTS);
1053     BICopy->eraseFromParent();
1054   }
1055 
1056   // Add counting PHI nodes to all loops in the region that can be used as
1057   // replacement for SCEVs refering to the old loop.
1058   for (BasicBlock *BB : SeenBlocks) {
1059     Loop *L = LI.getLoopFor(BB);
1060     if (L == nullptr || L->getHeader() != BB)
1061       continue;
1062 
1063     BasicBlock *BBCopy = BlockMap[BB];
1064     Value *NullVal = Builder.getInt32(0);
1065     PHINode *LoopPHI =
1066         PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv");
1067     Instruction *LoopPHIInc = BinaryOperator::CreateAdd(
1068         LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc");
1069     LoopPHI->insertBefore(BBCopy->begin());
1070     LoopPHIInc->insertBefore(BBCopy->getTerminator());
1071 
1072     for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) {
1073       if (!R->contains(PredBB))
1074         continue;
1075       if (L->contains(PredBB))
1076         LoopPHI->addIncoming(LoopPHIInc, BlockMap[PredBB]);
1077       else
1078         LoopPHI->addIncoming(NullVal, BlockMap[PredBB]);
1079     }
1080 
1081     for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy)))
1082       if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0)
1083         LoopPHI->addIncoming(NullVal, PredBBCopy);
1084 
1085     LTS[L] = SE.getUnknown(LoopPHI);
1086   }
1087 
1088   // Reset the old insert point for the build.
1089   Builder.SetInsertPoint(ExitBBCopy->begin());
1090 }
1091 
1092 void RegionGenerator::generateScalarLoads(ScopStmt &Stmt,
1093                                           const Instruction *Inst,
1094                                           ValueMapT &BBMap) {
1095 
1096   // Inside a non-affine region PHI nodes are copied not demoted. Once the
1097   // phi is copied it will reload all inputs from outside the region, hence
1098   // we do not need to generate code for the read access of the operands of a
1099   // PHI.
1100   if (isa<PHINode>(Inst))
1101     return;
1102 
1103   return BlockGenerator::generateScalarLoads(Stmt, Inst, BBMap);
1104 }
1105 
1106 void RegionGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB,
1107                                            LoopToScevMapT &LTS,
1108                                            ValueMapT &BBMap) {
1109   const Region &R = Stmt.getParent()->getRegion();
1110 
1111   assert(Stmt.getRegion() &&
1112          "Block statements need to use the generateScalarStores() "
1113          "function in the BlockGenerator");
1114 
1115   for (MemoryAccess *MA : Stmt) {
1116 
1117     if (MA->isExplicit() || MA->isRead())
1118       continue;
1119 
1120     Instruction *ScalarInst = MA->getAccessInstruction();
1121 
1122     // Only generate accesses that belong to this basic block.
1123     if (ScalarInst->getParent() != BB)
1124       continue;
1125 
1126     Value *Val = MA->getAccessValue();
1127 
1128     auto Address = getOrCreateAlloca(*MA);
1129 
1130     Val = getNewScalarValue(Val, R, Stmt, LTS, BBMap);
1131     Builder.CreateStore(Val, Address);
1132   }
1133 }
1134 
1135 void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, const PHINode *PHI,
1136                                       PHINode *PHICopy, BasicBlock *IncomingBB,
1137                                       LoopToScevMapT &LTS) {
1138   Region *StmtR = Stmt.getRegion();
1139 
1140   // If the incoming block was not yet copied mark this PHI as incomplete.
1141   // Once the block will be copied the incoming value will be added.
1142   BasicBlock *BBCopy = BlockMap[IncomingBB];
1143   if (!BBCopy) {
1144     assert(StmtR->contains(IncomingBB) &&
1145            "Bad incoming block for PHI in non-affine region");
1146     IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy));
1147     return;
1148   }
1149 
1150   Value *OpCopy = nullptr;
1151   if (StmtR->contains(IncomingBB)) {
1152     assert(RegionMaps.count(BBCopy) &&
1153            "Incoming PHI block did not have a BBMap");
1154     ValueMapT &BBCopyMap = RegionMaps[BBCopy];
1155 
1156     Value *Op = PHI->getIncomingValueForBlock(IncomingBB);
1157     OpCopy = getNewValue(Stmt, Op, BBCopyMap, LTS, getLoopForInst(PHI));
1158   } else {
1159 
1160     if (PHICopy->getBasicBlockIndex(BBCopy) >= 0)
1161       return;
1162 
1163     Value *PHIOpAddr = getOrCreatePHIAlloca(const_cast<PHINode *>(PHI));
1164     OpCopy = new LoadInst(PHIOpAddr, PHIOpAddr->getName() + ".reload",
1165                           BlockMap[IncomingBB]->getTerminator());
1166   }
1167 
1168   assert(OpCopy && "Incoming PHI value was not copied properly");
1169   assert(BBCopy && "Incoming PHI block was not copied properly");
1170   PHICopy->addIncoming(OpCopy, BBCopy);
1171 }
1172 
1173 Value *RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, PHINode *PHI,
1174                                            ValueMapT &BBMap,
1175                                            LoopToScevMapT &LTS) {
1176   unsigned NumIncoming = PHI->getNumIncomingValues();
1177   PHINode *PHICopy =
1178       Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName());
1179   PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI());
1180   BBMap[PHI] = PHICopy;
1181 
1182   for (unsigned u = 0; u < NumIncoming; u++)
1183     addOperandToPHI(Stmt, PHI, PHICopy, PHI->getIncomingBlock(u), LTS);
1184   return PHICopy;
1185 }
1186