1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the BlockGenerator and VectorBlockGenerator classes,
10 // which generate sequential code and vectorized code for a polyhedral
11 // statement, respectively.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "polly/CodeGen/BlockGenerators.h"
16 #include "polly/CodeGen/CodeGeneration.h"
17 #include "polly/CodeGen/IslExprBuilder.h"
18 #include "polly/CodeGen/RuntimeDebugBuilder.h"
19 #include "polly/Options.h"
20 #include "polly/ScopInfo.h"
21 #include "polly/Support/GICHelper.h"
22 #include "polly/Support/SCEVValidator.h"
23 #include "polly/Support/ScopHelper.h"
24 #include "polly/Support/VirtualInstruction.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/RegionInfo.h"
27 #include "llvm/Analysis/ScalarEvolution.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "isl/aff.h"
33 #include "isl/ast.h"
34 #include "isl/ast_build.h"
35 #include "isl/set.h"
36 #include <deque>
37 
38 using namespace llvm;
39 using namespace polly;
40 
41 static cl::opt<bool> Aligned("enable-polly-aligned",
42                              cl::desc("Assumed aligned memory accesses."),
43                              cl::Hidden, cl::init(false), cl::ZeroOrMore,
44                              cl::cat(PollyCategory));
45 
46 bool PollyDebugPrinting;
47 static cl::opt<bool, true> DebugPrintingX(
48     "polly-codegen-add-debug-printing",
49     cl::desc("Add printf calls that show the values loaded/stored."),
50     cl::location(PollyDebugPrinting), cl::Hidden, cl::init(false),
51     cl::ZeroOrMore, cl::cat(PollyCategory));
52 
53 static cl::opt<bool> TraceStmts(
54     "polly-codegen-trace-stmts",
55     cl::desc("Add printf calls that print the statement being executed"),
56     cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
57 
58 static cl::opt<bool> TraceScalars(
59     "polly-codegen-trace-scalars",
60     cl::desc("Add printf calls that print the values of all scalar values "
61              "used in a statement. Requires -polly-codegen-trace-stmts."),
62     cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
63 
64 BlockGenerator::BlockGenerator(
65     PollyIRBuilder &B, LoopInfo &LI, ScalarEvolution &SE, DominatorTree &DT,
66     AllocaMapTy &ScalarMap, EscapeUsersAllocaMapTy &EscapeMap,
67     ValueMapT &GlobalMap, IslExprBuilder *ExprBuilder, BasicBlock *StartBlock)
68     : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT),
69       EntryBB(nullptr), ScalarMap(ScalarMap), EscapeMap(EscapeMap),
70       GlobalMap(GlobalMap), StartBlock(StartBlock) {}
71 
72 Value *BlockGenerator::trySynthesizeNewValue(ScopStmt &Stmt, Value *Old,
73                                              ValueMapT &BBMap,
74                                              LoopToScevMapT &LTS,
75                                              Loop *L) const {
76   if (!SE.isSCEVable(Old->getType()))
77     return nullptr;
78 
79   const SCEV *Scev = SE.getSCEVAtScope(Old, L);
80   if (!Scev)
81     return nullptr;
82 
83   if (isa<SCEVCouldNotCompute>(Scev))
84     return nullptr;
85 
86   const SCEV *NewScev = SCEVLoopAddRecRewriter::rewrite(Scev, LTS, SE);
87   ValueMapT VTV;
88   VTV.insert(BBMap.begin(), BBMap.end());
89   VTV.insert(GlobalMap.begin(), GlobalMap.end());
90 
91   Scop &S = *Stmt.getParent();
92   const DataLayout &DL = S.getFunction().getParent()->getDataLayout();
93   auto IP = Builder.GetInsertPoint();
94 
95   assert(IP != Builder.GetInsertBlock()->end() &&
96          "Only instructions can be insert points for SCEVExpander");
97   Value *Expanded =
98       expandCodeFor(S, SE, DL, "polly", NewScev, Old->getType(), &*IP, &VTV,
99                     StartBlock->getSinglePredecessor());
100 
101   BBMap[Old] = Expanded;
102   return Expanded;
103 }
104 
105 Value *BlockGenerator::getNewValue(ScopStmt &Stmt, Value *Old, ValueMapT &BBMap,
106                                    LoopToScevMapT &LTS, Loop *L) const {
107 
108   auto lookupGlobally = [this](Value *Old) -> Value * {
109     Value *New = GlobalMap.lookup(Old);
110     if (!New)
111       return nullptr;
112 
113     // Required by:
114     // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll
115     // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll
116     // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll
117     // * Isl/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll
118     // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
119     // * Isl/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll
120     // GlobalMap should be a mapping from (value in original SCoP) to (copied
121     // value in generated SCoP), without intermediate mappings, which might
122     // easily require transitiveness as well.
123     if (Value *NewRemapped = GlobalMap.lookup(New))
124       New = NewRemapped;
125 
126     // No test case for this code.
127     if (Old->getType()->getScalarSizeInBits() <
128         New->getType()->getScalarSizeInBits())
129       New = Builder.CreateTruncOrBitCast(New, Old->getType());
130 
131     return New;
132   };
133 
134   Value *New = nullptr;
135   auto VUse = VirtualUse::create(&Stmt, L, Old, true);
136   switch (VUse.getKind()) {
137   case VirtualUse::Block:
138     // BasicBlock are constants, but the BlockGenerator copies them.
139     New = BBMap.lookup(Old);
140     break;
141 
142   case VirtualUse::Constant:
143     // Used by:
144     // * Isl/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll
145     // Constants should not be redefined. In this case, the GlobalMap just
146     // contains a mapping to the same constant, which is unnecessary, but
147     // harmless.
148     if ((New = lookupGlobally(Old)))
149       break;
150 
151     assert(!BBMap.count(Old));
152     New = Old;
153     break;
154 
155   case VirtualUse::ReadOnly:
156     assert(!GlobalMap.count(Old));
157 
158     // Required for:
159     // * Isl/CodeGen/MemAccess/create_arrays.ll
160     // * Isl/CodeGen/read-only-scalars.ll
161     // * ScheduleOptimizer/pattern-matching-based-opts_10.ll
162     // For some reason these reload a read-only value. The reloaded value ends
163     // up in BBMap, buts its value should be identical.
164     //
165     // Required for:
166     // * Isl/CodeGen/OpenMP/single_loop_with_param.ll
167     // The parallel subfunctions need to reference the read-only value from the
168     // parent function, this is done by reloading them locally.
169     if ((New = BBMap.lookup(Old)))
170       break;
171 
172     New = Old;
173     break;
174 
175   case VirtualUse::Synthesizable:
176     // Used by:
177     // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
178     // * Isl/CodeGen/OpenMP/recomputed-srem.ll
179     // * Isl/CodeGen/OpenMP/reference-other-bb.ll
180     // * Isl/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll
181     // For some reason synthesizable values end up in GlobalMap. Their values
182     // are the same as trySynthesizeNewValue would return. The legacy
183     // implementation prioritized GlobalMap, so this is what we do here as well.
184     // Ideally, synthesizable values should not end up in GlobalMap.
185     if ((New = lookupGlobally(Old)))
186       break;
187 
188     // Required for:
189     // * Isl/CodeGen/RuntimeDebugBuilder/combine_different_values.ll
190     // * Isl/CodeGen/getNumberOfIterations.ll
191     // * Isl/CodeGen/non_affine_float_compare.ll
192     // * ScheduleOptimizer/pattern-matching-based-opts_10.ll
193     // Ideally, synthesizable values are synthesized by trySynthesizeNewValue,
194     // not precomputed (SCEVExpander has its own caching mechanism).
195     // These tests fail without this, but I think trySynthesizeNewValue would
196     // just re-synthesize the same instructions.
197     if ((New = BBMap.lookup(Old)))
198       break;
199 
200     New = trySynthesizeNewValue(Stmt, Old, BBMap, LTS, L);
201     break;
202 
203   case VirtualUse::Hoisted:
204     // TODO: Hoisted invariant loads should be found in GlobalMap only, but not
205     // redefined locally (which will be ignored anyway). That is, the following
206     // assertion should apply: assert(!BBMap.count(Old))
207 
208     New = lookupGlobally(Old);
209     break;
210 
211   case VirtualUse::Intra:
212   case VirtualUse::Inter:
213     assert(!GlobalMap.count(Old) &&
214            "Intra and inter-stmt values are never global");
215     New = BBMap.lookup(Old);
216     break;
217   }
218   assert(New && "Unexpected scalar dependence in region!");
219   return New;
220 }
221 
222 void BlockGenerator::copyInstScalar(ScopStmt &Stmt, Instruction *Inst,
223                                     ValueMapT &BBMap, LoopToScevMapT &LTS) {
224   // We do not generate debug intrinsics as we did not investigate how to
225   // copy them correctly. At the current state, they just crash the code
226   // generation as the meta-data operands are not correctly copied.
227   if (isa<DbgInfoIntrinsic>(Inst))
228     return;
229 
230   Instruction *NewInst = Inst->clone();
231 
232   // Replace old operands with the new ones.
233   for (Value *OldOperand : Inst->operands()) {
234     Value *NewOperand =
235         getNewValue(Stmt, OldOperand, BBMap, LTS, getLoopForStmt(Stmt));
236 
237     if (!NewOperand) {
238       assert(!isa<StoreInst>(NewInst) &&
239              "Store instructions are always needed!");
240       NewInst->deleteValue();
241       return;
242     }
243 
244     NewInst->replaceUsesOfWith(OldOperand, NewOperand);
245   }
246 
247   Builder.Insert(NewInst);
248   BBMap[Inst] = NewInst;
249 
250   // When copying the instruction onto the Module meant for the GPU,
251   // debug metadata attached to an instruction causes all related
252   // metadata to be pulled into the Module. This includes the DICompileUnit,
253   // which will not be listed in llvm.dbg.cu of the Module since the Module
254   // doesn't contain one. This fails the verification of the Module and the
255   // subsequent generation of the ASM string.
256   if (NewInst->getModule() != Inst->getModule())
257     NewInst->setDebugLoc(llvm::DebugLoc());
258 
259   if (!NewInst->getType()->isVoidTy())
260     NewInst->setName("p_" + Inst->getName());
261 }
262 
263 Value *
264 BlockGenerator::generateLocationAccessed(ScopStmt &Stmt, MemAccInst Inst,
265                                          ValueMapT &BBMap, LoopToScevMapT &LTS,
266                                          isl_id_to_ast_expr *NewAccesses) {
267   const MemoryAccess &MA = Stmt.getArrayAccessFor(Inst);
268   return generateLocationAccessed(
269       Stmt, getLoopForStmt(Stmt),
270       Inst.isNull() ? nullptr : Inst.getPointerOperand(), BBMap, LTS,
271       NewAccesses, MA.getId().release(), MA.getAccessValue()->getType());
272 }
273 
274 Value *BlockGenerator::generateLocationAccessed(
275     ScopStmt &Stmt, Loop *L, Value *Pointer, ValueMapT &BBMap,
276     LoopToScevMapT &LTS, isl_id_to_ast_expr *NewAccesses, __isl_take isl_id *Id,
277     Type *ExpectedType) {
278   isl_ast_expr *AccessExpr = isl_id_to_ast_expr_get(NewAccesses, Id);
279 
280   if (AccessExpr) {
281     AccessExpr = isl_ast_expr_address_of(AccessExpr);
282     auto Address = ExprBuilder->create(AccessExpr);
283 
284     // Cast the address of this memory access to a pointer type that has the
285     // same element type as the original access, but uses the address space of
286     // the newly generated pointer.
287     auto OldPtrTy = ExpectedType->getPointerTo();
288     auto NewPtrTy = Address->getType();
289     OldPtrTy = PointerType::get(OldPtrTy->getElementType(),
290                                 NewPtrTy->getPointerAddressSpace());
291 
292     if (OldPtrTy != NewPtrTy)
293       Address = Builder.CreateBitOrPointerCast(Address, OldPtrTy);
294     return Address;
295   }
296   assert(
297       Pointer &&
298       "If expression was not generated, must use the original pointer value");
299   return getNewValue(Stmt, Pointer, BBMap, LTS, L);
300 }
301 
302 Value *
303 BlockGenerator::getImplicitAddress(MemoryAccess &Access, Loop *L,
304                                    LoopToScevMapT &LTS, ValueMapT &BBMap,
305                                    __isl_keep isl_id_to_ast_expr *NewAccesses) {
306   if (Access.isLatestArrayKind())
307     return generateLocationAccessed(*Access.getStatement(), L, nullptr, BBMap,
308                                     LTS, NewAccesses, Access.getId().release(),
309                                     Access.getAccessValue()->getType());
310 
311   return getOrCreateAlloca(Access);
312 }
313 
314 Loop *BlockGenerator::getLoopForStmt(const ScopStmt &Stmt) const {
315   auto *StmtBB = Stmt.getEntryBlock();
316   return LI.getLoopFor(StmtBB);
317 }
318 
319 Value *BlockGenerator::generateArrayLoad(ScopStmt &Stmt, LoadInst *Load,
320                                          ValueMapT &BBMap, LoopToScevMapT &LTS,
321                                          isl_id_to_ast_expr *NewAccesses) {
322   if (Value *PreloadLoad = GlobalMap.lookup(Load))
323     return PreloadLoad;
324 
325   Value *NewPointer =
326       generateLocationAccessed(Stmt, Load, BBMap, LTS, NewAccesses);
327   Value *ScalarLoad = Builder.CreateAlignedLoad(
328       NewPointer, Load->getAlignment(), Load->getName() + "_p_scalar_");
329 
330   if (PollyDebugPrinting)
331     RuntimeDebugBuilder::createCPUPrinter(Builder, "Load from ", NewPointer,
332                                           ": ", ScalarLoad, "\n");
333 
334   return ScalarLoad;
335 }
336 
337 void BlockGenerator::generateArrayStore(ScopStmt &Stmt, StoreInst *Store,
338                                         ValueMapT &BBMap, LoopToScevMapT &LTS,
339                                         isl_id_to_ast_expr *NewAccesses) {
340   MemoryAccess &MA = Stmt.getArrayAccessFor(Store);
341   isl::set AccDom = MA.getAccessRelation().domain();
342   std::string Subject = MA.getId().get_name();
343 
344   generateConditionalExecution(Stmt, AccDom, Subject.c_str(), [&, this]() {
345     Value *NewPointer =
346         generateLocationAccessed(Stmt, Store, BBMap, LTS, NewAccesses);
347     Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap,
348                                       LTS, getLoopForStmt(Stmt));
349 
350     if (PollyDebugPrinting)
351       RuntimeDebugBuilder::createCPUPrinter(Builder, "Store to  ", NewPointer,
352                                             ": ", ValueOperand, "\n");
353 
354     Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlignment());
355   });
356 }
357 
358 bool BlockGenerator::canSyntheziseInStmt(ScopStmt &Stmt, Instruction *Inst) {
359   Loop *L = getLoopForStmt(Stmt);
360   return (Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) &&
361          canSynthesize(Inst, *Stmt.getParent(), &SE, L);
362 }
363 
364 void BlockGenerator::copyInstruction(ScopStmt &Stmt, Instruction *Inst,
365                                      ValueMapT &BBMap, LoopToScevMapT &LTS,
366                                      isl_id_to_ast_expr *NewAccesses) {
367   // Terminator instructions control the control flow. They are explicitly
368   // expressed in the clast and do not need to be copied.
369   if (Inst->isTerminator())
370     return;
371 
372   // Synthesizable statements will be generated on-demand.
373   if (canSyntheziseInStmt(Stmt, Inst))
374     return;
375 
376   if (auto *Load = dyn_cast<LoadInst>(Inst)) {
377     Value *NewLoad = generateArrayLoad(Stmt, Load, BBMap, LTS, NewAccesses);
378     // Compute NewLoad before its insertion in BBMap to make the insertion
379     // deterministic.
380     BBMap[Load] = NewLoad;
381     return;
382   }
383 
384   if (auto *Store = dyn_cast<StoreInst>(Inst)) {
385     // Identified as redundant by -polly-simplify.
386     if (!Stmt.getArrayAccessOrNULLFor(Store))
387       return;
388 
389     generateArrayStore(Stmt, Store, BBMap, LTS, NewAccesses);
390     return;
391   }
392 
393   if (auto *PHI = dyn_cast<PHINode>(Inst)) {
394     copyPHIInstruction(Stmt, PHI, BBMap, LTS);
395     return;
396   }
397 
398   // Skip some special intrinsics for which we do not adjust the semantics to
399   // the new schedule. All others are handled like every other instruction.
400   if (isIgnoredIntrinsic(Inst))
401     return;
402 
403   copyInstScalar(Stmt, Inst, BBMap, LTS);
404 }
405 
406 void BlockGenerator::removeDeadInstructions(BasicBlock *BB, ValueMapT &BBMap) {
407   auto NewBB = Builder.GetInsertBlock();
408   for (auto I = NewBB->rbegin(); I != NewBB->rend(); I++) {
409     Instruction *NewInst = &*I;
410 
411     if (!isInstructionTriviallyDead(NewInst))
412       continue;
413 
414     for (auto Pair : BBMap)
415       if (Pair.second == NewInst) {
416         BBMap.erase(Pair.first);
417       }
418 
419     NewInst->eraseFromParent();
420     I = NewBB->rbegin();
421   }
422 }
423 
424 void BlockGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT &LTS,
425                               isl_id_to_ast_expr *NewAccesses) {
426   assert(Stmt.isBlockStmt() &&
427          "Only block statements can be copied by the block generator");
428 
429   ValueMapT BBMap;
430 
431   BasicBlock *BB = Stmt.getBasicBlock();
432   copyBB(Stmt, BB, BBMap, LTS, NewAccesses);
433   removeDeadInstructions(BB, BBMap);
434 }
435 
436 BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) {
437   BasicBlock *CopyBB = SplitBlock(Builder.GetInsertBlock(),
438                                   &*Builder.GetInsertPoint(), &DT, &LI);
439   CopyBB->setName("polly.stmt." + BB->getName());
440   return CopyBB;
441 }
442 
443 BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB,
444                                    ValueMapT &BBMap, LoopToScevMapT &LTS,
445                                    isl_id_to_ast_expr *NewAccesses) {
446   BasicBlock *CopyBB = splitBB(BB);
447   Builder.SetInsertPoint(&CopyBB->front());
448   generateScalarLoads(Stmt, LTS, BBMap, NewAccesses);
449   generateBeginStmtTrace(Stmt, LTS, BBMap);
450 
451   copyBB(Stmt, BB, CopyBB, BBMap, LTS, NewAccesses);
452 
453   // After a basic block was copied store all scalars that escape this block in
454   // their alloca.
455   generateScalarStores(Stmt, LTS, BBMap, NewAccesses);
456   return CopyBB;
457 }
458 
459 void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB,
460                             ValueMapT &BBMap, LoopToScevMapT &LTS,
461                             isl_id_to_ast_expr *NewAccesses) {
462   EntryBB = &CopyBB->getParent()->getEntryBlock();
463 
464   // Block statements and the entry blocks of region statement are code
465   // generated from instruction lists. This allow us to optimize the
466   // instructions that belong to a certain scop statement. As the code
467   // structure of region statements might be arbitrary complex, optimizing the
468   // instruction list is not yet supported.
469   if (Stmt.isBlockStmt() || (Stmt.isRegionStmt() && Stmt.getEntryBlock() == BB))
470     for (Instruction *Inst : Stmt.getInstructions())
471       copyInstruction(Stmt, Inst, BBMap, LTS, NewAccesses);
472   else
473     for (Instruction &Inst : *BB)
474       copyInstruction(Stmt, &Inst, BBMap, LTS, NewAccesses);
475 }
476 
477 Value *BlockGenerator::getOrCreateAlloca(const MemoryAccess &Access) {
478   assert(!Access.isLatestArrayKind() && "Trying to get alloca for array kind");
479 
480   return getOrCreateAlloca(Access.getLatestScopArrayInfo());
481 }
482 
483 Value *BlockGenerator::getOrCreateAlloca(const ScopArrayInfo *Array) {
484   assert(!Array->isArrayKind() && "Trying to get alloca for array kind");
485 
486   auto &Addr = ScalarMap[Array];
487 
488   if (Addr) {
489     // Allow allocas to be (temporarily) redirected once by adding a new
490     // old-alloca-addr to new-addr mapping to GlobalMap. This functionality
491     // is used for example by the OpenMP code generation where a first use
492     // of a scalar while still in the host code allocates a normal alloca with
493     // getOrCreateAlloca. When the values of this scalar are accessed during
494     // the generation of the parallel subfunction, these values are copied over
495     // to the parallel subfunction and each request for a scalar alloca slot
496     // must be forwarded to the temporary in-subfunction slot. This mapping is
497     // removed when the subfunction has been generated and again normal host
498     // code is generated. Due to the following reasons it is not possible to
499     // perform the GlobalMap lookup right after creating the alloca below, but
500     // instead we need to check GlobalMap at each call to getOrCreateAlloca:
501     //
502     //   1) GlobalMap may be changed multiple times (for each parallel loop),
503     //   2) The temporary mapping is commonly only known after the initial
504     //      alloca has already been generated, and
505     //   3) The original alloca value must be restored after leaving the
506     //      sub-function.
507     if (Value *NewAddr = GlobalMap.lookup(&*Addr))
508       return NewAddr;
509     return Addr;
510   }
511 
512   Type *Ty = Array->getElementType();
513   Value *ScalarBase = Array->getBasePtr();
514   std::string NameExt;
515   if (Array->isPHIKind())
516     NameExt = ".phiops";
517   else
518     NameExt = ".s2a";
519 
520   const DataLayout &DL = Builder.GetInsertBlock()->getModule()->getDataLayout();
521 
522   Addr = new AllocaInst(Ty, DL.getAllocaAddrSpace(),
523                         ScalarBase->getName() + NameExt);
524   EntryBB = &Builder.GetInsertBlock()->getParent()->getEntryBlock();
525   Addr->insertBefore(&*EntryBB->getFirstInsertionPt());
526 
527   return Addr;
528 }
529 
530 void BlockGenerator::handleOutsideUsers(const Scop &S, ScopArrayInfo *Array) {
531   Instruction *Inst = cast<Instruction>(Array->getBasePtr());
532 
533   // If there are escape users we get the alloca for this instruction and put it
534   // in the EscapeMap for later finalization. Lastly, if the instruction was
535   // copied multiple times we already did this and can exit.
536   if (EscapeMap.count(Inst))
537     return;
538 
539   EscapeUserVectorTy EscapeUsers;
540   for (User *U : Inst->users()) {
541 
542     // Non-instruction user will never escape.
543     Instruction *UI = dyn_cast<Instruction>(U);
544     if (!UI)
545       continue;
546 
547     if (S.contains(UI))
548       continue;
549 
550     EscapeUsers.push_back(UI);
551   }
552 
553   // Exit if no escape uses were found.
554   if (EscapeUsers.empty())
555     return;
556 
557   // Get or create an escape alloca for this instruction.
558   auto *ScalarAddr = getOrCreateAlloca(Array);
559 
560   // Remember that this instruction has escape uses and the escape alloca.
561   EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers));
562 }
563 
564 void BlockGenerator::generateScalarLoads(
565     ScopStmt &Stmt, LoopToScevMapT &LTS, ValueMapT &BBMap,
566     __isl_keep isl_id_to_ast_expr *NewAccesses) {
567   for (MemoryAccess *MA : Stmt) {
568     if (MA->isOriginalArrayKind() || MA->isWrite())
569       continue;
570 
571 #ifndef NDEBUG
572     auto StmtDom =
573         Stmt.getDomain().intersect_params(Stmt.getParent()->getContext());
574     auto AccDom = MA->getAccessRelation().domain();
575     assert(!StmtDom.is_subset(AccDom).is_false() &&
576            "Scalar must be loaded in all statement instances");
577 #endif
578 
579     auto *Address =
580         getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS, BBMap, NewAccesses);
581     assert((!isa<Instruction>(Address) ||
582             DT.dominates(cast<Instruction>(Address)->getParent(),
583                          Builder.GetInsertBlock())) &&
584            "Domination violation");
585     BBMap[MA->getAccessValue()] =
586         Builder.CreateLoad(Address, Address->getName() + ".reload");
587   }
588 }
589 
590 Value *BlockGenerator::buildContainsCondition(ScopStmt &Stmt,
591                                               const isl::set &Subdomain) {
592   isl::ast_build AstBuild = Stmt.getAstBuild();
593   isl::set Domain = Stmt.getDomain();
594 
595   isl::union_map USchedule = AstBuild.get_schedule();
596   USchedule = USchedule.intersect_domain(Domain);
597 
598   assert(!USchedule.is_empty());
599   isl::map Schedule = isl::map::from_union_map(USchedule);
600 
601   isl::set ScheduledDomain = Schedule.range();
602   isl::set ScheduledSet = Subdomain.apply(Schedule);
603 
604   isl::ast_build RestrictedBuild = AstBuild.restrict(ScheduledDomain);
605 
606   isl::ast_expr IsInSet = RestrictedBuild.expr_from(ScheduledSet);
607   Value *IsInSetExpr = ExprBuilder->create(IsInSet.copy());
608   IsInSetExpr = Builder.CreateICmpNE(
609       IsInSetExpr, ConstantInt::get(IsInSetExpr->getType(), 0));
610 
611   return IsInSetExpr;
612 }
613 
614 void BlockGenerator::generateConditionalExecution(
615     ScopStmt &Stmt, const isl::set &Subdomain, StringRef Subject,
616     const std::function<void()> &GenThenFunc) {
617   isl::set StmtDom = Stmt.getDomain();
618 
619   // If the condition is a tautology, don't generate a condition around the
620   // code.
621   bool IsPartialWrite =
622       !StmtDom.intersect_params(Stmt.getParent()->getContext())
623            .is_subset(Subdomain);
624   if (!IsPartialWrite) {
625     GenThenFunc();
626     return;
627   }
628 
629   // Generate the condition.
630   Value *Cond = buildContainsCondition(Stmt, Subdomain);
631 
632   // Don't call GenThenFunc if it is never executed. An ast index expression
633   // might not be defined in this case.
634   if (auto *Const = dyn_cast<ConstantInt>(Cond))
635     if (Const->isZero())
636       return;
637 
638   BasicBlock *HeadBlock = Builder.GetInsertBlock();
639   StringRef BlockName = HeadBlock->getName();
640 
641   // Generate the conditional block.
642   SplitBlockAndInsertIfThen(Cond, &*Builder.GetInsertPoint(), false, nullptr,
643                             &DT, &LI);
644   BranchInst *Branch = cast<BranchInst>(HeadBlock->getTerminator());
645   BasicBlock *ThenBlock = Branch->getSuccessor(0);
646   BasicBlock *TailBlock = Branch->getSuccessor(1);
647 
648   // Assign descriptive names.
649   if (auto *CondInst = dyn_cast<Instruction>(Cond))
650     CondInst->setName("polly." + Subject + ".cond");
651   ThenBlock->setName(BlockName + "." + Subject + ".partial");
652   TailBlock->setName(BlockName + ".cont");
653 
654   // Put the client code into the conditional block and continue in the merge
655   // block afterwards.
656   Builder.SetInsertPoint(ThenBlock, ThenBlock->getFirstInsertionPt());
657   GenThenFunc();
658   Builder.SetInsertPoint(TailBlock, TailBlock->getFirstInsertionPt());
659 }
660 
661 static std::string getInstName(Value *Val) {
662   std::string Result;
663   raw_string_ostream OS(Result);
664   Val->printAsOperand(OS, false);
665   return OS.str();
666 }
667 
668 void BlockGenerator::generateBeginStmtTrace(ScopStmt &Stmt, LoopToScevMapT &LTS,
669                                             ValueMapT &BBMap) {
670   if (!TraceStmts)
671     return;
672 
673   Scop *S = Stmt.getParent();
674   const char *BaseName = Stmt.getBaseName();
675 
676   isl::ast_build AstBuild = Stmt.getAstBuild();
677   isl::set Domain = Stmt.getDomain();
678 
679   isl::union_map USchedule = AstBuild.get_schedule().intersect_domain(Domain);
680   isl::map Schedule = isl::map::from_union_map(USchedule);
681   assert(Schedule.is_empty().is_false() &&
682          "The stmt must have a valid instance");
683 
684   isl::multi_pw_aff ScheduleMultiPwAff =
685       isl::pw_multi_aff::from_map(Schedule.reverse());
686   isl::ast_build RestrictedBuild = AstBuild.restrict(Schedule.range());
687 
688   // Sequence of strings to print.
689   SmallVector<llvm::Value *, 8> Values;
690 
691   // Print the name of the statement.
692   // TODO: Indent by the depth of the statement instance in the schedule tree.
693   Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, BaseName));
694   Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "("));
695 
696   // Add the coordinate of the statement instance.
697   int DomDims = ScheduleMultiPwAff.dim(isl::dim::out);
698   for (int i = 0; i < DomDims; i += 1) {
699     if (i > 0)
700       Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ","));
701 
702     isl::ast_expr IsInSet =
703         RestrictedBuild.expr_from(ScheduleMultiPwAff.get_pw_aff(i));
704     Values.push_back(ExprBuilder->create(IsInSet.copy()));
705   }
706 
707   if (TraceScalars) {
708     Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ")"));
709     DenseSet<Instruction *> Encountered;
710 
711     // Add the value of each scalar (and the result of PHIs) used in the
712     // statement.
713     // TODO: Values used in region-statements.
714     for (Instruction *Inst : Stmt.insts()) {
715       if (!RuntimeDebugBuilder::isPrintable(Inst->getType()))
716         continue;
717 
718       if (isa<PHINode>(Inst)) {
719         Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, " "));
720         Values.push_back(RuntimeDebugBuilder::getPrintableString(
721             Builder, getInstName(Inst)));
722         Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "="));
723         Values.push_back(getNewValue(Stmt, Inst, BBMap, LTS,
724                                      LI.getLoopFor(Inst->getParent())));
725       } else {
726         for (Value *Op : Inst->operand_values()) {
727           // Do not print values that cannot change during the execution of the
728           // SCoP.
729           auto *OpInst = dyn_cast<Instruction>(Op);
730           if (!OpInst)
731             continue;
732           if (!S->contains(OpInst))
733             continue;
734 
735           // Print each scalar at most once, and exclude values defined in the
736           // statement itself.
737           if (Encountered.count(OpInst))
738             continue;
739 
740           Values.push_back(
741               RuntimeDebugBuilder::getPrintableString(Builder, " "));
742           Values.push_back(RuntimeDebugBuilder::getPrintableString(
743               Builder, getInstName(OpInst)));
744           Values.push_back(
745               RuntimeDebugBuilder::getPrintableString(Builder, "="));
746           Values.push_back(getNewValue(Stmt, OpInst, BBMap, LTS,
747                                        LI.getLoopFor(Inst->getParent())));
748           Encountered.insert(OpInst);
749         }
750       }
751 
752       Encountered.insert(Inst);
753     }
754 
755     Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "\n"));
756   } else {
757     Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ")\n"));
758   }
759 
760   RuntimeDebugBuilder::createCPUPrinter(Builder, ArrayRef<Value *>(Values));
761 }
762 
763 void BlockGenerator::generateScalarStores(
764     ScopStmt &Stmt, LoopToScevMapT &LTS, ValueMapT &BBMap,
765     __isl_keep isl_id_to_ast_expr *NewAccesses) {
766   Loop *L = LI.getLoopFor(Stmt.getBasicBlock());
767 
768   assert(Stmt.isBlockStmt() &&
769          "Region statements need to use the generateScalarStores() function in "
770          "the RegionGenerator");
771 
772   for (MemoryAccess *MA : Stmt) {
773     if (MA->isOriginalArrayKind() || MA->isRead())
774       continue;
775 
776     isl::set AccDom = MA->getAccessRelation().domain();
777     std::string Subject = MA->getId().get_name();
778 
779     generateConditionalExecution(
780         Stmt, AccDom, Subject.c_str(), [&, this, MA]() {
781           Value *Val = MA->getAccessValue();
782           if (MA->isAnyPHIKind()) {
783             assert(MA->getIncoming().size() >= 1 &&
784                    "Block statements have exactly one exiting block, or "
785                    "multiple but "
786                    "with same incoming block and value");
787             assert(std::all_of(MA->getIncoming().begin(),
788                                MA->getIncoming().end(),
789                                [&](std::pair<BasicBlock *, Value *> p) -> bool {
790                                  return p.first == Stmt.getBasicBlock();
791                                }) &&
792                    "Incoming block must be statement's block");
793             Val = MA->getIncoming()[0].second;
794           }
795           auto Address = getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS,
796                                             BBMap, NewAccesses);
797 
798           Val = getNewValue(Stmt, Val, BBMap, LTS, L);
799           assert((!isa<Instruction>(Val) ||
800                   DT.dominates(cast<Instruction>(Val)->getParent(),
801                                Builder.GetInsertBlock())) &&
802                  "Domination violation");
803           assert((!isa<Instruction>(Address) ||
804                   DT.dominates(cast<Instruction>(Address)->getParent(),
805                                Builder.GetInsertBlock())) &&
806                  "Domination violation");
807 
808           // The new Val might have a different type than the old Val due to
809           // ScalarEvolution looking through bitcasts.
810           if (Val->getType() != Address->getType()->getPointerElementType())
811             Address = Builder.CreateBitOrPointerCast(
812                 Address, Val->getType()->getPointerTo());
813 
814           Builder.CreateStore(Val, Address);
815         });
816   }
817 }
818 
819 void BlockGenerator::createScalarInitialization(Scop &S) {
820   BasicBlock *ExitBB = S.getExit();
821   BasicBlock *PreEntryBB = S.getEnteringBlock();
822 
823   Builder.SetInsertPoint(&*StartBlock->begin());
824 
825   for (auto &Array : S.arrays()) {
826     if (Array->getNumberOfDimensions() != 0)
827       continue;
828     if (Array->isPHIKind()) {
829       // For PHI nodes, the only values we need to store are the ones that
830       // reach the PHI node from outside the region. In general there should
831       // only be one such incoming edge and this edge should enter through
832       // 'PreEntryBB'.
833       auto PHI = cast<PHINode>(Array->getBasePtr());
834 
835       for (auto BI = PHI->block_begin(), BE = PHI->block_end(); BI != BE; BI++)
836         if (!S.contains(*BI) && *BI != PreEntryBB)
837           llvm_unreachable("Incoming edges from outside the scop should always "
838                            "come from PreEntryBB");
839 
840       int Idx = PHI->getBasicBlockIndex(PreEntryBB);
841       if (Idx < 0)
842         continue;
843 
844       Value *ScalarValue = PHI->getIncomingValue(Idx);
845 
846       Builder.CreateStore(ScalarValue, getOrCreateAlloca(Array));
847       continue;
848     }
849 
850     auto *Inst = dyn_cast<Instruction>(Array->getBasePtr());
851 
852     if (Inst && S.contains(Inst))
853       continue;
854 
855     // PHI nodes that are not marked as such in their SAI object are either exit
856     // PHI nodes we model as common scalars but without initialization, or
857     // incoming phi nodes that need to be initialized. Check if the first is the
858     // case for Inst and do not create and initialize memory if so.
859     if (auto *PHI = dyn_cast_or_null<PHINode>(Inst))
860       if (!S.hasSingleExitEdge() && PHI->getBasicBlockIndex(ExitBB) >= 0)
861         continue;
862 
863     Builder.CreateStore(Array->getBasePtr(), getOrCreateAlloca(Array));
864   }
865 }
866 
867 void BlockGenerator::createScalarFinalization(Scop &S) {
868   // The exit block of the __unoptimized__ region.
869   BasicBlock *ExitBB = S.getExitingBlock();
870   // The merge block __just after__ the region and the optimized region.
871   BasicBlock *MergeBB = S.getExit();
872 
873   // The exit block of the __optimized__ region.
874   BasicBlock *OptExitBB = *(pred_begin(MergeBB));
875   if (OptExitBB == ExitBB)
876     OptExitBB = *(++pred_begin(MergeBB));
877 
878   Builder.SetInsertPoint(OptExitBB->getTerminator());
879   for (const auto &EscapeMapping : EscapeMap) {
880     // Extract the escaping instruction and the escaping users as well as the
881     // alloca the instruction was demoted to.
882     Instruction *EscapeInst = EscapeMapping.first;
883     const auto &EscapeMappingValue = EscapeMapping.second;
884     const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second;
885     Value *ScalarAddr = EscapeMappingValue.first;
886 
887     // Reload the demoted instruction in the optimized version of the SCoP.
888     Value *EscapeInstReload =
889         Builder.CreateLoad(ScalarAddr, EscapeInst->getName() + ".final_reload");
890     EscapeInstReload =
891         Builder.CreateBitOrPointerCast(EscapeInstReload, EscapeInst->getType());
892 
893     // Create the merge PHI that merges the optimized and unoptimized version.
894     PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2,
895                                         EscapeInst->getName() + ".merge");
896     MergePHI->insertBefore(&*MergeBB->getFirstInsertionPt());
897 
898     // Add the respective values to the merge PHI.
899     MergePHI->addIncoming(EscapeInstReload, OptExitBB);
900     MergePHI->addIncoming(EscapeInst, ExitBB);
901 
902     // The information of scalar evolution about the escaping instruction needs
903     // to be revoked so the new merged instruction will be used.
904     if (SE.isSCEVable(EscapeInst->getType()))
905       SE.forgetValue(EscapeInst);
906 
907     // Replace all uses of the demoted instruction with the merge PHI.
908     for (Instruction *EUser : EscapeUsers)
909       EUser->replaceUsesOfWith(EscapeInst, MergePHI);
910   }
911 }
912 
913 void BlockGenerator::findOutsideUsers(Scop &S) {
914   for (auto &Array : S.arrays()) {
915 
916     if (Array->getNumberOfDimensions() != 0)
917       continue;
918 
919     if (Array->isPHIKind())
920       continue;
921 
922     auto *Inst = dyn_cast<Instruction>(Array->getBasePtr());
923 
924     if (!Inst)
925       continue;
926 
927     // Scop invariant hoisting moves some of the base pointers out of the scop.
928     // We can ignore these, as the invariant load hoisting already registers the
929     // relevant outside users.
930     if (!S.contains(Inst))
931       continue;
932 
933     handleOutsideUsers(S, Array);
934   }
935 }
936 
937 void BlockGenerator::createExitPHINodeMerges(Scop &S) {
938   if (S.hasSingleExitEdge())
939     return;
940 
941   auto *ExitBB = S.getExitingBlock();
942   auto *MergeBB = S.getExit();
943   auto *AfterMergeBB = MergeBB->getSingleSuccessor();
944   BasicBlock *OptExitBB = *(pred_begin(MergeBB));
945   if (OptExitBB == ExitBB)
946     OptExitBB = *(++pred_begin(MergeBB));
947 
948   Builder.SetInsertPoint(OptExitBB->getTerminator());
949 
950   for (auto &SAI : S.arrays()) {
951     auto *Val = SAI->getBasePtr();
952 
953     // Only Value-like scalars need a merge PHI. Exit block PHIs receive either
954     // the original PHI's value or the reloaded incoming values from the
955     // generated code. An llvm::Value is merged between the original code's
956     // value or the generated one.
957     if (!SAI->isExitPHIKind())
958       continue;
959 
960     PHINode *PHI = dyn_cast<PHINode>(Val);
961     if (!PHI)
962       continue;
963 
964     if (PHI->getParent() != AfterMergeBB)
965       continue;
966 
967     std::string Name = PHI->getName();
968     Value *ScalarAddr = getOrCreateAlloca(SAI);
969     Value *Reload = Builder.CreateLoad(ScalarAddr, Name + ".ph.final_reload");
970     Reload = Builder.CreateBitOrPointerCast(Reload, PHI->getType());
971     Value *OriginalValue = PHI->getIncomingValueForBlock(MergeBB);
972     assert((!isa<Instruction>(OriginalValue) ||
973             cast<Instruction>(OriginalValue)->getParent() != MergeBB) &&
974            "Original value must no be one we just generated.");
975     auto *MergePHI = PHINode::Create(PHI->getType(), 2, Name + ".ph.merge");
976     MergePHI->insertBefore(&*MergeBB->getFirstInsertionPt());
977     MergePHI->addIncoming(Reload, OptExitBB);
978     MergePHI->addIncoming(OriginalValue, ExitBB);
979     int Idx = PHI->getBasicBlockIndex(MergeBB);
980     PHI->setIncomingValue(Idx, MergePHI);
981   }
982 }
983 
984 void BlockGenerator::invalidateScalarEvolution(Scop &S) {
985   for (auto &Stmt : S)
986     if (Stmt.isCopyStmt())
987       continue;
988     else if (Stmt.isBlockStmt())
989       for (auto &Inst : *Stmt.getBasicBlock())
990         SE.forgetValue(&Inst);
991     else if (Stmt.isRegionStmt())
992       for (auto *BB : Stmt.getRegion()->blocks())
993         for (auto &Inst : *BB)
994           SE.forgetValue(&Inst);
995     else
996       llvm_unreachable("Unexpected statement type found");
997 
998   // Invalidate SCEV of loops surrounding the EscapeUsers.
999   for (const auto &EscapeMapping : EscapeMap) {
1000     const EscapeUserVectorTy &EscapeUsers = EscapeMapping.second.second;
1001     for (Instruction *EUser : EscapeUsers) {
1002       if (Loop *L = LI.getLoopFor(EUser->getParent()))
1003         while (L) {
1004           SE.forgetLoop(L);
1005           L = L->getParentLoop();
1006         }
1007     }
1008   }
1009 }
1010 
1011 void BlockGenerator::finalizeSCoP(Scop &S) {
1012   findOutsideUsers(S);
1013   createScalarInitialization(S);
1014   createExitPHINodeMerges(S);
1015   createScalarFinalization(S);
1016   invalidateScalarEvolution(S);
1017 }
1018 
1019 VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen,
1020                                            std::vector<LoopToScevMapT> &VLTS,
1021                                            isl_map *Schedule)
1022     : BlockGenerator(BlockGen), VLTS(VLTS), Schedule(Schedule) {
1023   assert(Schedule && "No statement domain provided");
1024 }
1025 
1026 Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, Value *Old,
1027                                             ValueMapT &VectorMap,
1028                                             VectorValueMapT &ScalarMaps,
1029                                             Loop *L) {
1030   if (Value *NewValue = VectorMap.lookup(Old))
1031     return NewValue;
1032 
1033   int Width = getVectorWidth();
1034 
1035   Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width));
1036 
1037   for (int Lane = 0; Lane < Width; Lane++)
1038     Vector = Builder.CreateInsertElement(
1039         Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], VLTS[Lane], L),
1040         Builder.getInt32(Lane));
1041 
1042   VectorMap[Old] = Vector;
1043 
1044   return Vector;
1045 }
1046 
1047 Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) {
1048   PointerType *PointerTy = dyn_cast<PointerType>(Val->getType());
1049   assert(PointerTy && "PointerType expected");
1050 
1051   Type *ScalarType = PointerTy->getElementType();
1052   VectorType *VectorType = VectorType::get(ScalarType, Width);
1053 
1054   return PointerType::getUnqual(VectorType);
1055 }
1056 
1057 Value *VectorBlockGenerator::generateStrideOneLoad(
1058     ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps,
1059     __isl_keep isl_id_to_ast_expr *NewAccesses, bool NegativeStride = false) {
1060   unsigned VectorWidth = getVectorWidth();
1061   auto *Pointer = Load->getPointerOperand();
1062   Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth);
1063   unsigned Offset = NegativeStride ? VectorWidth - 1 : 0;
1064 
1065   Value *NewPointer = generateLocationAccessed(Stmt, Load, ScalarMaps[Offset],
1066                                                VLTS[Offset], NewAccesses);
1067   Value *VectorPtr =
1068       Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
1069   LoadInst *VecLoad =
1070       Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full");
1071   if (!Aligned)
1072     VecLoad->setAlignment(8);
1073 
1074   if (NegativeStride) {
1075     SmallVector<Constant *, 16> Indices;
1076     for (int i = VectorWidth - 1; i >= 0; i--)
1077       Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i));
1078     Constant *SV = llvm::ConstantVector::get(Indices);
1079     Value *RevVecLoad = Builder.CreateShuffleVector(
1080         VecLoad, VecLoad, SV, Load->getName() + "_reverse");
1081     return RevVecLoad;
1082   }
1083 
1084   return VecLoad;
1085 }
1086 
1087 Value *VectorBlockGenerator::generateStrideZeroLoad(
1088     ScopStmt &Stmt, LoadInst *Load, ValueMapT &BBMap,
1089     __isl_keep isl_id_to_ast_expr *NewAccesses) {
1090   auto *Pointer = Load->getPointerOperand();
1091   Type *VectorPtrType = getVectorPtrTy(Pointer, 1);
1092   Value *NewPointer =
1093       generateLocationAccessed(Stmt, Load, BBMap, VLTS[0], NewAccesses);
1094   Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType,
1095                                            Load->getName() + "_p_vec_p");
1096   LoadInst *ScalarLoad =
1097       Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one");
1098 
1099   if (!Aligned)
1100     ScalarLoad->setAlignment(8);
1101 
1102   Constant *SplatVector = Constant::getNullValue(
1103       VectorType::get(Builder.getInt32Ty(), getVectorWidth()));
1104 
1105   Value *VectorLoad = Builder.CreateShuffleVector(
1106       ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat");
1107   return VectorLoad;
1108 }
1109 
1110 Value *VectorBlockGenerator::generateUnknownStrideLoad(
1111     ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps,
1112     __isl_keep isl_id_to_ast_expr *NewAccesses) {
1113   int VectorWidth = getVectorWidth();
1114   auto *Pointer = Load->getPointerOperand();
1115   VectorType *VectorType = VectorType::get(
1116       dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth);
1117 
1118   Value *Vector = UndefValue::get(VectorType);
1119 
1120   for (int i = 0; i < VectorWidth; i++) {
1121     Value *NewPointer = generateLocationAccessed(Stmt, Load, ScalarMaps[i],
1122                                                  VLTS[i], NewAccesses);
1123     Value *ScalarLoad =
1124         Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_");
1125     Vector = Builder.CreateInsertElement(
1126         Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_");
1127   }
1128 
1129   return Vector;
1130 }
1131 
1132 void VectorBlockGenerator::generateLoad(
1133     ScopStmt &Stmt, LoadInst *Load, ValueMapT &VectorMap,
1134     VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1135   if (Value *PreloadLoad = GlobalMap.lookup(Load)) {
1136     VectorMap[Load] = Builder.CreateVectorSplat(getVectorWidth(), PreloadLoad,
1137                                                 Load->getName() + "_p");
1138     return;
1139   }
1140 
1141   if (!VectorType::isValidElementType(Load->getType())) {
1142     for (int i = 0; i < getVectorWidth(); i++)
1143       ScalarMaps[i][Load] =
1144           generateArrayLoad(Stmt, Load, ScalarMaps[i], VLTS[i], NewAccesses);
1145     return;
1146   }
1147 
1148   const MemoryAccess &Access = Stmt.getArrayAccessFor(Load);
1149 
1150   // Make sure we have scalar values available to access the pointer to
1151   // the data location.
1152   extractScalarValues(Load, VectorMap, ScalarMaps);
1153 
1154   Value *NewLoad;
1155   if (Access.isStrideZero(isl::manage_copy(Schedule)))
1156     NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0], NewAccesses);
1157   else if (Access.isStrideOne(isl::manage_copy(Schedule)))
1158     NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses);
1159   else if (Access.isStrideX(isl::manage_copy(Schedule), -1))
1160     NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses, true);
1161   else
1162     NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps, NewAccesses);
1163 
1164   VectorMap[Load] = NewLoad;
1165 }
1166 
1167 void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, UnaryInstruction *Inst,
1168                                          ValueMapT &VectorMap,
1169                                          VectorValueMapT &ScalarMaps) {
1170   int VectorWidth = getVectorWidth();
1171   Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap,
1172                                      ScalarMaps, getLoopForStmt(Stmt));
1173 
1174   assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction");
1175 
1176   const CastInst *Cast = dyn_cast<CastInst>(Inst);
1177   VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth);
1178   VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType);
1179 }
1180 
1181 void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, BinaryOperator *Inst,
1182                                           ValueMapT &VectorMap,
1183                                           VectorValueMapT &ScalarMaps) {
1184   Loop *L = getLoopForStmt(Stmt);
1185   Value *OpZero = Inst->getOperand(0);
1186   Value *OpOne = Inst->getOperand(1);
1187 
1188   Value *NewOpZero, *NewOpOne;
1189   NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L);
1190   NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L);
1191 
1192   Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne,
1193                                        Inst->getName() + "p_vec");
1194   VectorMap[Inst] = NewInst;
1195 }
1196 
1197 void VectorBlockGenerator::copyStore(
1198     ScopStmt &Stmt, StoreInst *Store, ValueMapT &VectorMap,
1199     VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1200   const MemoryAccess &Access = Stmt.getArrayAccessFor(Store);
1201 
1202   auto *Pointer = Store->getPointerOperand();
1203   Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap,
1204                                  ScalarMaps, getLoopForStmt(Stmt));
1205 
1206   // Make sure we have scalar values available to access the pointer to
1207   // the data location.
1208   extractScalarValues(Store, VectorMap, ScalarMaps);
1209 
1210   if (Access.isStrideOne(isl::manage_copy(Schedule))) {
1211     Type *VectorPtrType = getVectorPtrTy(Pointer, getVectorWidth());
1212     Value *NewPointer = generateLocationAccessed(Stmt, Store, ScalarMaps[0],
1213                                                  VLTS[0], NewAccesses);
1214 
1215     Value *VectorPtr =
1216         Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
1217     StoreInst *Store = Builder.CreateStore(Vector, VectorPtr);
1218 
1219     if (!Aligned)
1220       Store->setAlignment(8);
1221   } else {
1222     for (unsigned i = 0; i < ScalarMaps.size(); i++) {
1223       Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i));
1224       Value *NewPointer = generateLocationAccessed(Stmt, Store, ScalarMaps[i],
1225                                                    VLTS[i], NewAccesses);
1226       Builder.CreateStore(Scalar, NewPointer);
1227     }
1228   }
1229 }
1230 
1231 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst,
1232                                              ValueMapT &VectorMap) {
1233   for (Value *Operand : Inst->operands())
1234     if (VectorMap.count(Operand))
1235       return true;
1236   return false;
1237 }
1238 
1239 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst,
1240                                                ValueMapT &VectorMap,
1241                                                VectorValueMapT &ScalarMaps) {
1242   bool HasVectorOperand = false;
1243   int VectorWidth = getVectorWidth();
1244 
1245   for (Value *Operand : Inst->operands()) {
1246     ValueMapT::iterator VecOp = VectorMap.find(Operand);
1247 
1248     if (VecOp == VectorMap.end())
1249       continue;
1250 
1251     HasVectorOperand = true;
1252     Value *NewVector = VecOp->second;
1253 
1254     for (int i = 0; i < VectorWidth; ++i) {
1255       ValueMapT &SM = ScalarMaps[i];
1256 
1257       // If there is one scalar extracted, all scalar elements should have
1258       // already been extracted by the code here. So no need to check for the
1259       // existence of all of them.
1260       if (SM.count(Operand))
1261         break;
1262 
1263       SM[Operand] =
1264           Builder.CreateExtractElement(NewVector, Builder.getInt32(i));
1265     }
1266   }
1267 
1268   return HasVectorOperand;
1269 }
1270 
1271 void VectorBlockGenerator::copyInstScalarized(
1272     ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
1273     VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1274   bool HasVectorOperand;
1275   int VectorWidth = getVectorWidth();
1276 
1277   HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps);
1278 
1279   for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++)
1280     BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane],
1281                                     VLTS[VectorLane], NewAccesses);
1282 
1283   if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand)
1284     return;
1285 
1286   // Make the result available as vector value.
1287   VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth);
1288   Value *Vector = UndefValue::get(VectorType);
1289 
1290   for (int i = 0; i < VectorWidth; i++)
1291     Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst],
1292                                          Builder.getInt32(i));
1293 
1294   VectorMap[Inst] = Vector;
1295 }
1296 
1297 int VectorBlockGenerator::getVectorWidth() { return VLTS.size(); }
1298 
1299 void VectorBlockGenerator::copyInstruction(
1300     ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
1301     VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1302   // Terminator instructions control the control flow. They are explicitly
1303   // expressed in the clast and do not need to be copied.
1304   if (Inst->isTerminator())
1305     return;
1306 
1307   if (canSyntheziseInStmt(Stmt, Inst))
1308     return;
1309 
1310   if (auto *Load = dyn_cast<LoadInst>(Inst)) {
1311     generateLoad(Stmt, Load, VectorMap, ScalarMaps, NewAccesses);
1312     return;
1313   }
1314 
1315   if (hasVectorOperands(Inst, VectorMap)) {
1316     if (auto *Store = dyn_cast<StoreInst>(Inst)) {
1317       // Identified as redundant by -polly-simplify.
1318       if (!Stmt.getArrayAccessOrNULLFor(Store))
1319         return;
1320 
1321       copyStore(Stmt, Store, VectorMap, ScalarMaps, NewAccesses);
1322       return;
1323     }
1324 
1325     if (auto *Unary = dyn_cast<UnaryInstruction>(Inst)) {
1326       copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps);
1327       return;
1328     }
1329 
1330     if (auto *Binary = dyn_cast<BinaryOperator>(Inst)) {
1331       copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps);
1332       return;
1333     }
1334 
1335     // Fallthrough: We generate scalar instructions, if we don't know how to
1336     // generate vector code.
1337   }
1338 
1339   copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps, NewAccesses);
1340 }
1341 
1342 void VectorBlockGenerator::generateScalarVectorLoads(
1343     ScopStmt &Stmt, ValueMapT &VectorBlockMap) {
1344   for (MemoryAccess *MA : Stmt) {
1345     if (MA->isArrayKind() || MA->isWrite())
1346       continue;
1347 
1348     auto *Address = getOrCreateAlloca(*MA);
1349     Type *VectorPtrType = getVectorPtrTy(Address, 1);
1350     Value *VectorPtr = Builder.CreateBitCast(Address, VectorPtrType,
1351                                              Address->getName() + "_p_vec_p");
1352     auto *Val = Builder.CreateLoad(VectorPtr, Address->getName() + ".reload");
1353     Constant *SplatVector = Constant::getNullValue(
1354         VectorType::get(Builder.getInt32Ty(), getVectorWidth()));
1355 
1356     Value *VectorVal = Builder.CreateShuffleVector(
1357         Val, Val, SplatVector, Address->getName() + "_p_splat");
1358     VectorBlockMap[MA->getAccessValue()] = VectorVal;
1359   }
1360 }
1361 
1362 void VectorBlockGenerator::verifyNoScalarStores(ScopStmt &Stmt) {
1363   for (MemoryAccess *MA : Stmt) {
1364     if (MA->isArrayKind() || MA->isRead())
1365       continue;
1366 
1367     llvm_unreachable("Scalar stores not expected in vector loop");
1368   }
1369 }
1370 
1371 void VectorBlockGenerator::copyStmt(
1372     ScopStmt &Stmt, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1373   assert(Stmt.isBlockStmt() &&
1374          "TODO: Only block statements can be copied by the vector block "
1375          "generator");
1376 
1377   BasicBlock *BB = Stmt.getBasicBlock();
1378   BasicBlock *CopyBB = SplitBlock(Builder.GetInsertBlock(),
1379                                   &*Builder.GetInsertPoint(), &DT, &LI);
1380   CopyBB->setName("polly.stmt." + BB->getName());
1381   Builder.SetInsertPoint(&CopyBB->front());
1382 
1383   // Create two maps that store the mapping from the original instructions of
1384   // the old basic block to their copies in the new basic block. Those maps
1385   // are basic block local.
1386   //
1387   // As vector code generation is supported there is one map for scalar values
1388   // and one for vector values.
1389   //
1390   // In case we just do scalar code generation, the vectorMap is not used and
1391   // the scalarMap has just one dimension, which contains the mapping.
1392   //
1393   // In case vector code generation is done, an instruction may either appear
1394   // in the vector map once (as it is calculating >vectorwidth< values at a
1395   // time. Or (if the values are calculated using scalar operations), it
1396   // appears once in every dimension of the scalarMap.
1397   VectorValueMapT ScalarBlockMap(getVectorWidth());
1398   ValueMapT VectorBlockMap;
1399 
1400   generateScalarVectorLoads(Stmt, VectorBlockMap);
1401 
1402   for (Instruction &Inst : *BB)
1403     copyInstruction(Stmt, &Inst, VectorBlockMap, ScalarBlockMap, NewAccesses);
1404 
1405   verifyNoScalarStores(Stmt);
1406 }
1407 
1408 BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB,
1409                                              BasicBlock *BBCopy) {
1410 
1411   BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock();
1412   BasicBlock *BBCopyIDom = EndBlockMap.lookup(BBIDom);
1413 
1414   if (BBCopyIDom)
1415     DT.changeImmediateDominator(BBCopy, BBCopyIDom);
1416 
1417   return StartBlockMap.lookup(BBIDom);
1418 }
1419 
1420 // This is to determine whether an llvm::Value (defined in @p BB) is usable when
1421 // leaving a subregion. The straight-forward DT.dominates(BB, R->getExitBlock())
1422 // does not work in cases where the exit block has edges from outside the
1423 // region. In that case the llvm::Value would never be usable in in the exit
1424 // block. The RegionGenerator however creates an new exit block ('ExitBBCopy')
1425 // for the subregion's exiting edges only. We need to determine whether an
1426 // llvm::Value is usable in there. We do this by checking whether it dominates
1427 // all exiting blocks individually.
1428 static bool isDominatingSubregionExit(const DominatorTree &DT, Region *R,
1429                                       BasicBlock *BB) {
1430   for (auto ExitingBB : predecessors(R->getExit())) {
1431     // Check for non-subregion incoming edges.
1432     if (!R->contains(ExitingBB))
1433       continue;
1434 
1435     if (!DT.dominates(BB, ExitingBB))
1436       return false;
1437   }
1438 
1439   return true;
1440 }
1441 
1442 // Find the direct dominator of the subregion's exit block if the subregion was
1443 // simplified.
1444 static BasicBlock *findExitDominator(DominatorTree &DT, Region *R) {
1445   BasicBlock *Common = nullptr;
1446   for (auto ExitingBB : predecessors(R->getExit())) {
1447     // Check for non-subregion incoming edges.
1448     if (!R->contains(ExitingBB))
1449       continue;
1450 
1451     // First exiting edge.
1452     if (!Common) {
1453       Common = ExitingBB;
1454       continue;
1455     }
1456 
1457     Common = DT.findNearestCommonDominator(Common, ExitingBB);
1458   }
1459 
1460   assert(Common && R->contains(Common));
1461   return Common;
1462 }
1463 
1464 void RegionGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT &LTS,
1465                                isl_id_to_ast_expr *IdToAstExp) {
1466   assert(Stmt.isRegionStmt() &&
1467          "Only region statements can be copied by the region generator");
1468 
1469   // Forget all old mappings.
1470   StartBlockMap.clear();
1471   EndBlockMap.clear();
1472   RegionMaps.clear();
1473   IncompletePHINodeMap.clear();
1474 
1475   // Collection of all values related to this subregion.
1476   ValueMapT ValueMap;
1477 
1478   // The region represented by the statement.
1479   Region *R = Stmt.getRegion();
1480 
1481   // Create a dedicated entry for the region where we can reload all demoted
1482   // inputs.
1483   BasicBlock *EntryBB = R->getEntry();
1484   BasicBlock *EntryBBCopy = SplitBlock(Builder.GetInsertBlock(),
1485                                        &*Builder.GetInsertPoint(), &DT, &LI);
1486   EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry");
1487   Builder.SetInsertPoint(&EntryBBCopy->front());
1488 
1489   ValueMapT &EntryBBMap = RegionMaps[EntryBBCopy];
1490   generateScalarLoads(Stmt, LTS, EntryBBMap, IdToAstExp);
1491   generateBeginStmtTrace(Stmt, LTS, EntryBBMap);
1492 
1493   for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI)
1494     if (!R->contains(*PI)) {
1495       StartBlockMap[*PI] = EntryBBCopy;
1496       EndBlockMap[*PI] = EntryBBCopy;
1497     }
1498 
1499   // Iterate over all blocks in the region in a breadth-first search.
1500   std::deque<BasicBlock *> Blocks;
1501   SmallSetVector<BasicBlock *, 8> SeenBlocks;
1502   Blocks.push_back(EntryBB);
1503   SeenBlocks.insert(EntryBB);
1504 
1505   while (!Blocks.empty()) {
1506     BasicBlock *BB = Blocks.front();
1507     Blocks.pop_front();
1508 
1509     // First split the block and update dominance information.
1510     BasicBlock *BBCopy = splitBB(BB);
1511     BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy);
1512 
1513     // Get the mapping for this block and initialize it with either the scalar
1514     // loads from the generated entering block (which dominates all blocks of
1515     // this subregion) or the maps of the immediate dominator, if part of the
1516     // subregion. The latter necessarily includes the former.
1517     ValueMapT *InitBBMap;
1518     if (BBCopyIDom) {
1519       assert(RegionMaps.count(BBCopyIDom));
1520       InitBBMap = &RegionMaps[BBCopyIDom];
1521     } else
1522       InitBBMap = &EntryBBMap;
1523     auto Inserted = RegionMaps.insert(std::make_pair(BBCopy, *InitBBMap));
1524     ValueMapT &RegionMap = Inserted.first->second;
1525 
1526     // Copy the block with the BlockGenerator.
1527     Builder.SetInsertPoint(&BBCopy->front());
1528     copyBB(Stmt, BB, BBCopy, RegionMap, LTS, IdToAstExp);
1529 
1530     // In order to remap PHI nodes we store also basic block mappings.
1531     StartBlockMap[BB] = BBCopy;
1532     EndBlockMap[BB] = Builder.GetInsertBlock();
1533 
1534     // Add values to incomplete PHI nodes waiting for this block to be copied.
1535     for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB])
1536       addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, LTS);
1537     IncompletePHINodeMap[BB].clear();
1538 
1539     // And continue with new successors inside the region.
1540     for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++)
1541       if (R->contains(*SI) && SeenBlocks.insert(*SI))
1542         Blocks.push_back(*SI);
1543 
1544     // Remember value in case it is visible after this subregion.
1545     if (isDominatingSubregionExit(DT, R, BB))
1546       ValueMap.insert(RegionMap.begin(), RegionMap.end());
1547   }
1548 
1549   // Now create a new dedicated region exit block and add it to the region map.
1550   BasicBlock *ExitBBCopy = SplitBlock(Builder.GetInsertBlock(),
1551                                       &*Builder.GetInsertPoint(), &DT, &LI);
1552   ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit");
1553   StartBlockMap[R->getExit()] = ExitBBCopy;
1554   EndBlockMap[R->getExit()] = ExitBBCopy;
1555 
1556   BasicBlock *ExitDomBBCopy = EndBlockMap.lookup(findExitDominator(DT, R));
1557   assert(ExitDomBBCopy &&
1558          "Common exit dominator must be within region; at least the entry node "
1559          "must match");
1560   DT.changeImmediateDominator(ExitBBCopy, ExitDomBBCopy);
1561 
1562   // As the block generator doesn't handle control flow we need to add the
1563   // region control flow by hand after all blocks have been copied.
1564   for (BasicBlock *BB : SeenBlocks) {
1565 
1566     BasicBlock *BBCopyStart = StartBlockMap[BB];
1567     BasicBlock *BBCopyEnd = EndBlockMap[BB];
1568     Instruction *TI = BB->getTerminator();
1569     if (isa<UnreachableInst>(TI)) {
1570       while (!BBCopyEnd->empty())
1571         BBCopyEnd->begin()->eraseFromParent();
1572       new UnreachableInst(BBCopyEnd->getContext(), BBCopyEnd);
1573       continue;
1574     }
1575 
1576     Instruction *BICopy = BBCopyEnd->getTerminator();
1577 
1578     ValueMapT &RegionMap = RegionMaps[BBCopyStart];
1579     RegionMap.insert(StartBlockMap.begin(), StartBlockMap.end());
1580 
1581     Builder.SetInsertPoint(BICopy);
1582     copyInstScalar(Stmt, TI, RegionMap, LTS);
1583     BICopy->eraseFromParent();
1584   }
1585 
1586   // Add counting PHI nodes to all loops in the region that can be used as
1587   // replacement for SCEVs referring to the old loop.
1588   for (BasicBlock *BB : SeenBlocks) {
1589     Loop *L = LI.getLoopFor(BB);
1590     if (L == nullptr || L->getHeader() != BB || !R->contains(L))
1591       continue;
1592 
1593     BasicBlock *BBCopy = StartBlockMap[BB];
1594     Value *NullVal = Builder.getInt32(0);
1595     PHINode *LoopPHI =
1596         PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv");
1597     Instruction *LoopPHIInc = BinaryOperator::CreateAdd(
1598         LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc");
1599     LoopPHI->insertBefore(&BBCopy->front());
1600     LoopPHIInc->insertBefore(BBCopy->getTerminator());
1601 
1602     for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) {
1603       if (!R->contains(PredBB))
1604         continue;
1605       if (L->contains(PredBB))
1606         LoopPHI->addIncoming(LoopPHIInc, EndBlockMap[PredBB]);
1607       else
1608         LoopPHI->addIncoming(NullVal, EndBlockMap[PredBB]);
1609     }
1610 
1611     for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy)))
1612       if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0)
1613         LoopPHI->addIncoming(NullVal, PredBBCopy);
1614 
1615     LTS[L] = SE.getUnknown(LoopPHI);
1616   }
1617 
1618   // Continue generating code in the exit block.
1619   Builder.SetInsertPoint(&*ExitBBCopy->getFirstInsertionPt());
1620 
1621   // Write values visible to other statements.
1622   generateScalarStores(Stmt, LTS, ValueMap, IdToAstExp);
1623   StartBlockMap.clear();
1624   EndBlockMap.clear();
1625   RegionMaps.clear();
1626   IncompletePHINodeMap.clear();
1627 }
1628 
1629 PHINode *RegionGenerator::buildExitPHI(MemoryAccess *MA, LoopToScevMapT &LTS,
1630                                        ValueMapT &BBMap, Loop *L) {
1631   ScopStmt *Stmt = MA->getStatement();
1632   Region *SubR = Stmt->getRegion();
1633   auto Incoming = MA->getIncoming();
1634 
1635   PollyIRBuilder::InsertPointGuard IPGuard(Builder);
1636   PHINode *OrigPHI = cast<PHINode>(MA->getAccessInstruction());
1637   BasicBlock *NewSubregionExit = Builder.GetInsertBlock();
1638 
1639   // This can happen if the subregion is simplified after the ScopStmts
1640   // have been created; simplification happens as part of CodeGeneration.
1641   if (OrigPHI->getParent() != SubR->getExit()) {
1642     BasicBlock *FormerExit = SubR->getExitingBlock();
1643     if (FormerExit)
1644       NewSubregionExit = StartBlockMap.lookup(FormerExit);
1645   }
1646 
1647   PHINode *NewPHI = PHINode::Create(OrigPHI->getType(), Incoming.size(),
1648                                     "polly." + OrigPHI->getName(),
1649                                     NewSubregionExit->getFirstNonPHI());
1650 
1651   // Add the incoming values to the PHI.
1652   for (auto &Pair : Incoming) {
1653     BasicBlock *OrigIncomingBlock = Pair.first;
1654     BasicBlock *NewIncomingBlockStart = StartBlockMap.lookup(OrigIncomingBlock);
1655     BasicBlock *NewIncomingBlockEnd = EndBlockMap.lookup(OrigIncomingBlock);
1656     Builder.SetInsertPoint(NewIncomingBlockEnd->getTerminator());
1657     assert(RegionMaps.count(NewIncomingBlockStart));
1658     assert(RegionMaps.count(NewIncomingBlockEnd));
1659     ValueMapT *LocalBBMap = &RegionMaps[NewIncomingBlockStart];
1660 
1661     Value *OrigIncomingValue = Pair.second;
1662     Value *NewIncomingValue =
1663         getNewValue(*Stmt, OrigIncomingValue, *LocalBBMap, LTS, L);
1664     NewPHI->addIncoming(NewIncomingValue, NewIncomingBlockEnd);
1665   }
1666 
1667   return NewPHI;
1668 }
1669 
1670 Value *RegionGenerator::getExitScalar(MemoryAccess *MA, LoopToScevMapT &LTS,
1671                                       ValueMapT &BBMap) {
1672   ScopStmt *Stmt = MA->getStatement();
1673 
1674   // TODO: Add some test cases that ensure this is really the right choice.
1675   Loop *L = LI.getLoopFor(Stmt->getRegion()->getExit());
1676 
1677   if (MA->isAnyPHIKind()) {
1678     auto Incoming = MA->getIncoming();
1679     assert(!Incoming.empty() &&
1680            "PHI WRITEs must have originate from at least one incoming block");
1681 
1682     // If there is only one incoming value, we do not need to create a PHI.
1683     if (Incoming.size() == 1) {
1684       Value *OldVal = Incoming[0].second;
1685       return getNewValue(*Stmt, OldVal, BBMap, LTS, L);
1686     }
1687 
1688     return buildExitPHI(MA, LTS, BBMap, L);
1689   }
1690 
1691   // MemoryKind::Value accesses leaving the subregion must dominate the exit
1692   // block; just pass the copied value.
1693   Value *OldVal = MA->getAccessValue();
1694   return getNewValue(*Stmt, OldVal, BBMap, LTS, L);
1695 }
1696 
1697 void RegionGenerator::generateScalarStores(
1698     ScopStmt &Stmt, LoopToScevMapT &LTS, ValueMapT &BBMap,
1699     __isl_keep isl_id_to_ast_expr *NewAccesses) {
1700   assert(Stmt.getRegion() &&
1701          "Block statements need to use the generateScalarStores() "
1702          "function in the BlockGenerator");
1703 
1704   for (MemoryAccess *MA : Stmt) {
1705     if (MA->isOriginalArrayKind() || MA->isRead())
1706       continue;
1707 
1708     isl::set AccDom = MA->getAccessRelation().domain();
1709     std::string Subject = MA->getId().get_name();
1710     generateConditionalExecution(
1711         Stmt, AccDom, Subject.c_str(), [&, this, MA]() {
1712           Value *NewVal = getExitScalar(MA, LTS, BBMap);
1713           Value *Address = getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS,
1714                                               BBMap, NewAccesses);
1715           assert((!isa<Instruction>(NewVal) ||
1716                   DT.dominates(cast<Instruction>(NewVal)->getParent(),
1717                                Builder.GetInsertBlock())) &&
1718                  "Domination violation");
1719           assert((!isa<Instruction>(Address) ||
1720                   DT.dominates(cast<Instruction>(Address)->getParent(),
1721                                Builder.GetInsertBlock())) &&
1722                  "Domination violation");
1723           Builder.CreateStore(NewVal, Address);
1724         });
1725   }
1726 }
1727 
1728 void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, PHINode *PHI,
1729                                       PHINode *PHICopy, BasicBlock *IncomingBB,
1730                                       LoopToScevMapT &LTS) {
1731   // If the incoming block was not yet copied mark this PHI as incomplete.
1732   // Once the block will be copied the incoming value will be added.
1733   BasicBlock *BBCopyStart = StartBlockMap[IncomingBB];
1734   BasicBlock *BBCopyEnd = EndBlockMap[IncomingBB];
1735   if (!BBCopyStart) {
1736     assert(!BBCopyEnd);
1737     assert(Stmt.represents(IncomingBB) &&
1738            "Bad incoming block for PHI in non-affine region");
1739     IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy));
1740     return;
1741   }
1742 
1743   assert(RegionMaps.count(BBCopyStart) &&
1744          "Incoming PHI block did not have a BBMap");
1745   ValueMapT &BBCopyMap = RegionMaps[BBCopyStart];
1746 
1747   Value *OpCopy = nullptr;
1748 
1749   if (Stmt.represents(IncomingBB)) {
1750     Value *Op = PHI->getIncomingValueForBlock(IncomingBB);
1751 
1752     // If the current insert block is different from the PHIs incoming block
1753     // change it, otherwise do not.
1754     auto IP = Builder.GetInsertPoint();
1755     if (IP->getParent() != BBCopyEnd)
1756       Builder.SetInsertPoint(BBCopyEnd->getTerminator());
1757     OpCopy = getNewValue(Stmt, Op, BBCopyMap, LTS, getLoopForStmt(Stmt));
1758     if (IP->getParent() != BBCopyEnd)
1759       Builder.SetInsertPoint(&*IP);
1760   } else {
1761     // All edges from outside the non-affine region become a single edge
1762     // in the new copy of the non-affine region. Make sure to only add the
1763     // corresponding edge the first time we encounter a basic block from
1764     // outside the non-affine region.
1765     if (PHICopy->getBasicBlockIndex(BBCopyEnd) >= 0)
1766       return;
1767 
1768     // Get the reloaded value.
1769     OpCopy = getNewValue(Stmt, PHI, BBCopyMap, LTS, getLoopForStmt(Stmt));
1770   }
1771 
1772   assert(OpCopy && "Incoming PHI value was not copied properly");
1773   PHICopy->addIncoming(OpCopy, BBCopyEnd);
1774 }
1775 
1776 void RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, PHINode *PHI,
1777                                          ValueMapT &BBMap,
1778                                          LoopToScevMapT &LTS) {
1779   unsigned NumIncoming = PHI->getNumIncomingValues();
1780   PHINode *PHICopy =
1781       Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName());
1782   PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI());
1783   BBMap[PHI] = PHICopy;
1784 
1785   for (BasicBlock *IncomingBB : PHI->blocks())
1786     addOperandToPHI(Stmt, PHI, PHICopy, IncomingBB, LTS);
1787 }
1788