1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
10 // GPU mapping strategy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "polly/CodeGen/PPCGCodeGeneration.h"
15 #include "polly/CodeGen/CodeGeneration.h"
16 #include "polly/CodeGen/IslAst.h"
17 #include "polly/CodeGen/IslNodeBuilder.h"
18 #include "polly/CodeGen/PerfMonitor.h"
19 #include "polly/CodeGen/Utils.h"
20 #include "polly/DependenceInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/ScopInfo.h"
25 #include "polly/Support/ISLTools.h"
26 #include "polly/Support/SCEVValidator.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/IR/IntrinsicsNVPTX.h"
30 #include "llvm/IR/LegacyPassManager.h"
31 #include "llvm/IR/Verifier.h"
32 #include "llvm/IRReader/IRReader.h"
33 #include "llvm/InitializePasses.h"
34 #include "llvm/Linker/Linker.h"
35 #include "llvm/MC/TargetRegistry.h"
36 #include "llvm/Support/SourceMgr.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
39 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
40 #include "isl/union_map.h"
41 #include <algorithm>
42 
43 extern "C" {
44 #include "ppcg/cuda.h"
45 #include "ppcg/gpu.h"
46 #include "ppcg/ppcg.h"
47 }
48 
49 #include "llvm/Support/Debug.h"
50 
51 using namespace polly;
52 using namespace llvm;
53 
54 #define DEBUG_TYPE "polly-codegen-ppcg"
55 
56 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule",
57                                   cl::desc("Dump the computed GPU Schedule"),
58                                   cl::Hidden, cl::init(false), cl::ZeroOrMore,
59                                   cl::cat(PollyCategory));
60 
61 static cl::opt<bool>
62     DumpCode("polly-acc-dump-code",
63              cl::desc("Dump C code describing the GPU mapping"), cl::Hidden,
64              cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
65 
66 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
67                                   cl::desc("Dump the kernel LLVM-IR"),
68                                   cl::Hidden, cl::init(false), cl::ZeroOrMore,
69                                   cl::cat(PollyCategory));
70 
71 static cl::opt<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
72                                    cl::desc("Dump the kernel assembly code"),
73                                    cl::Hidden, cl::init(false), cl::ZeroOrMore,
74                                    cl::cat(PollyCategory));
75 
76 static cl::opt<bool> FastMath("polly-acc-fastmath",
77                               cl::desc("Allow unsafe math optimizations"),
78                               cl::Hidden, cl::init(false), cl::ZeroOrMore,
79                               cl::cat(PollyCategory));
80 static cl::opt<bool> SharedMemory("polly-acc-use-shared",
81                                   cl::desc("Use shared memory"), cl::Hidden,
82                                   cl::init(false), cl::ZeroOrMore,
83                                   cl::cat(PollyCategory));
84 static cl::opt<bool> PrivateMemory("polly-acc-use-private",
85                                    cl::desc("Use private memory"), cl::Hidden,
86                                    cl::init(false), cl::ZeroOrMore,
87                                    cl::cat(PollyCategory));
88 
89 bool polly::PollyManagedMemory;
90 static cl::opt<bool, true>
91     XManagedMemory("polly-acc-codegen-managed-memory",
92                    cl::desc("Generate Host kernel code assuming"
93                             " that all memory has been"
94                             " declared as managed memory"),
95                    cl::location(PollyManagedMemory), cl::Hidden,
96                    cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
97 
98 static cl::opt<bool>
99     FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure",
100                               cl::desc("Fail and generate a backtrace if"
101                                        " verifyModule fails on the GPU "
102                                        " kernel module."),
103                               cl::Hidden, cl::init(false), cl::ZeroOrMore,
104                               cl::cat(PollyCategory));
105 
106 static cl::opt<std::string> CUDALibDevice(
107     "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden,
108     cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"),
109     cl::ZeroOrMore, cl::cat(PollyCategory));
110 
111 static cl::opt<std::string>
112     CudaVersion("polly-acc-cuda-version",
113                 cl::desc("The CUDA version to compile for"), cl::Hidden,
114                 cl::init("sm_30"), cl::ZeroOrMore, cl::cat(PollyCategory));
115 
116 static cl::opt<int>
117     MinCompute("polly-acc-mincompute",
118                cl::desc("Minimal number of compute statements to run on GPU."),
119                cl::Hidden, cl::init(10 * 512 * 512));
120 
121 GPURuntime polly::GPURuntimeChoice;
122 static cl::opt<GPURuntime, true> XGPURuntimeChoice(
123     "polly-gpu-runtime", cl::desc("The GPU Runtime API to target"),
124     cl::values(clEnumValN(GPURuntime::CUDA, "libcudart",
125                           "use the CUDA Runtime API"),
126                clEnumValN(GPURuntime::OpenCL, "libopencl",
127                           "use the OpenCL Runtime API")),
128     cl::location(polly::GPURuntimeChoice), cl::init(GPURuntime::CUDA),
129     cl::ZeroOrMore, cl::cat(PollyCategory));
130 
131 GPUArch polly::GPUArchChoice;
132 static cl::opt<GPUArch, true>
133     XGPUArchChoice("polly-gpu-arch", cl::desc("The GPU Architecture to target"),
134                    cl::values(clEnumValN(GPUArch::NVPTX64, "nvptx64",
135                                          "target NVIDIA 64-bit architecture"),
136                               clEnumValN(GPUArch::SPIR32, "spir32",
137                                          "target SPIR 32-bit architecture"),
138                               clEnumValN(GPUArch::SPIR64, "spir64",
139                                          "target SPIR 64-bit architecture")),
140                    cl::location(polly::GPUArchChoice),
141                    cl::init(GPUArch::NVPTX64), cl::ZeroOrMore,
142                    cl::cat(PollyCategory));
143 
144 extern bool polly::PerfMonitoring;
145 
146 /// Return  a unique name for a Scop, which is the scop region with the
147 /// function name.
148 std::string getUniqueScopName(const Scop *S) {
149   return "Scop Region: " + S->getNameStr() +
150          " | Function: " + std::string(S->getFunction().getName());
151 }
152 
153 /// Used to store information PPCG wants for kills. This information is
154 /// used by live range reordering.
155 ///
156 /// @see computeLiveRangeReordering
157 /// @see GPUNodeBuilder::createPPCGScop
158 /// @see GPUNodeBuilder::createPPCGProg
159 struct MustKillsInfo {
160   /// Collection of all kill statements that will be sequenced at the end of
161   /// PPCGScop->schedule.
162   ///
163   /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set`
164   /// which merges schedules in *arbitrary* order.
165   /// (we don't care about the order of the kills anyway).
166   isl::schedule KillsSchedule;
167   /// Map from kill statement instances to scalars that need to be
168   /// killed.
169   ///
170   /// We currently derive kill information for:
171   ///  1. phi nodes. PHI nodes are not alive outside the scop and can
172   ///     consequently all be killed.
173   ///  2. Scalar arrays that are not used outside the Scop. This is
174   ///     checked by `isScalarUsesContainedInScop`.
175   /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
176   isl::union_map TaggedMustKills;
177 
178   /// Tagged must kills stripped of the tags.
179   /// [params] -> { Stmt_phantom[]  -> scalar_to_kill[] }
180   isl::union_map MustKills;
181 
182   MustKillsInfo() : KillsSchedule() {}
183 };
184 
185 /// Check if SAI's uses are entirely contained within Scop S.
186 /// If a scalar is used only with a Scop, we are free to kill it, as no data
187 /// can flow in/out of the value any more.
188 /// @see computeMustKillsInfo
189 static bool isScalarUsesContainedInScop(const Scop &S,
190                                         const ScopArrayInfo *SAI) {
191   assert(SAI->isValueKind() && "this function only deals with scalars."
192                                " Dealing with arrays required alias analysis");
193 
194   const Region &R = S.getRegion();
195   for (User *U : SAI->getBasePtr()->users()) {
196     Instruction *I = dyn_cast<Instruction>(U);
197     assert(I && "invalid user of scop array info");
198     if (!R.contains(I))
199       return false;
200   }
201   return true;
202 }
203 
204 /// Compute must-kills needed to enable live range reordering with PPCG.
205 ///
206 /// @params S The Scop to compute live range reordering information
207 /// @returns live range reordering information that can be used to setup
208 /// PPCG.
209 static MustKillsInfo computeMustKillsInfo(const Scop &S) {
210   const isl::space ParamSpace = S.getParamSpace();
211   MustKillsInfo Info;
212 
213   // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria:
214   //      1.1 phi nodes in scop.
215   //      1.2 scalars that are only used within the scop
216   SmallVector<isl::id, 4> KillMemIds;
217   for (ScopArrayInfo *SAI : S.arrays()) {
218     if (SAI->isPHIKind() ||
219         (SAI->isValueKind() && isScalarUsesContainedInScop(S, SAI)))
220       KillMemIds.push_back(isl::manage(SAI->getBasePtrId().release()));
221   }
222 
223   Info.TaggedMustKills = isl::union_map::empty(ParamSpace.ctx());
224   Info.MustKills = isl::union_map::empty(ParamSpace.ctx());
225 
226   // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the
227   // schedule:
228   //     - filter: "[control] -> { }"
229   // So, we choose to not create this to keep the output a little nicer,
230   // at the cost of some code complexity.
231   Info.KillsSchedule = {};
232 
233   for (isl::id &ToKillId : KillMemIds) {
234     isl::id KillStmtId = isl::id::alloc(
235         S.getIslCtx(),
236         std::string("SKill_phantom_").append(ToKillId.get_name()), nullptr);
237 
238     // NOTE: construction of tagged_must_kill:
239     // 2. We need to construct a map:
240     //     [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
241     // To construct this, we use `isl_map_domain_product` on 2 maps`:
242     // 2a. StmtToScalar:
243     //         [param] -> { Stmt_phantom[] -> scalar_to_kill[] }
244     // 2b. PhantomRefToScalar:
245     //         [param] -> { ref_phantom[] -> scalar_to_kill[] }
246     //
247     // Combining these with `isl_map_domain_product` gives us
248     // TaggedMustKill:
249     //     [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
250 
251     // 2a. [param] -> { Stmt[] -> scalar_to_kill[] }
252     isl::map StmtToScalar = isl::map::universe(ParamSpace);
253     StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::in, isl::id(KillStmtId));
254     StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::out, isl::id(ToKillId));
255 
256     isl::id PhantomRefId = isl::id::alloc(
257         S.getIslCtx(), std::string("ref_phantom") + ToKillId.get_name(),
258         nullptr);
259 
260     // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] }
261     isl::map PhantomRefToScalar = isl::map::universe(ParamSpace);
262     PhantomRefToScalar =
263         PhantomRefToScalar.set_tuple_id(isl::dim::in, PhantomRefId);
264     PhantomRefToScalar =
265         PhantomRefToScalar.set_tuple_id(isl::dim::out, ToKillId);
266 
267     // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
268     isl::map TaggedMustKill = StmtToScalar.domain_product(PhantomRefToScalar);
269     Info.TaggedMustKills = Info.TaggedMustKills.unite(TaggedMustKill);
270 
271     // 2. [param] -> { Stmt[] -> scalar_to_kill[] }
272     Info.MustKills = Info.TaggedMustKills.domain_factor_domain();
273 
274     // 3. Create the kill schedule of the form:
275     //     "[param] -> { Stmt_phantom[] }"
276     // Then add this to Info.KillsSchedule.
277     isl::space KillStmtSpace = ParamSpace;
278     KillStmtSpace = KillStmtSpace.set_tuple_id(isl::dim::set, KillStmtId);
279     isl::union_set KillStmtDomain = isl::set::universe(KillStmtSpace);
280 
281     isl::schedule KillSchedule = isl::schedule::from_domain(KillStmtDomain);
282     if (!Info.KillsSchedule.is_null())
283       Info.KillsSchedule = isl::manage(
284           isl_schedule_set(Info.KillsSchedule.release(), KillSchedule.copy()));
285     else
286       Info.KillsSchedule = KillSchedule;
287   }
288 
289   return Info;
290 }
291 
292 /// Create the ast expressions for a ScopStmt.
293 ///
294 /// This function is a callback for to generate the ast expressions for each
295 /// of the scheduled ScopStmts.
296 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt(
297     void *StmtT, __isl_take isl_ast_build *Build_C,
298     isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA,
299                                        isl_id *Id, void *User),
300     void *UserIndex,
301     isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User),
302     void *UserExpr) {
303 
304   ScopStmt *Stmt = (ScopStmt *)StmtT;
305 
306   if (!Stmt || !Build_C)
307     return NULL;
308 
309   isl::ast_build Build = isl::manage_copy(Build_C);
310   isl::ctx Ctx = Build.ctx();
311   isl::id_to_ast_expr RefToExpr = isl::id_to_ast_expr::alloc(Ctx, 0);
312 
313   Stmt->setAstBuild(Build);
314 
315   for (MemoryAccess *Acc : *Stmt) {
316     isl::map AddrFunc = Acc->getAddressFunction();
317     AddrFunc = AddrFunc.intersect_domain(Stmt->getDomain());
318 
319     isl::id RefId = Acc->getId();
320     isl::pw_multi_aff PMA = isl::pw_multi_aff::from_map(AddrFunc);
321 
322     isl::multi_pw_aff MPA = isl::multi_pw_aff(PMA);
323     MPA = MPA.coalesce();
324     MPA = isl::manage(FunctionIndex(MPA.release(), RefId.get(), UserIndex));
325 
326     isl::ast_expr Access = Build.access_from(MPA);
327     Access = isl::manage(FunctionExpr(Access.release(), RefId.get(), UserExpr));
328     RefToExpr = RefToExpr.set(RefId, Access);
329   }
330 
331   return RefToExpr.release();
332 }
333 
334 /// Given a LLVM Type, compute its size in bytes,
335 static int computeSizeInBytes(const Type *T) {
336   int bytes = T->getPrimitiveSizeInBits() / 8;
337   if (bytes == 0)
338     bytes = T->getScalarSizeInBits() / 8;
339   return bytes;
340 }
341 
342 /// Generate code for a GPU specific isl AST.
343 ///
344 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
345 /// generates code for general-purpose AST nodes, with special functionality
346 /// for generating GPU specific user nodes.
347 ///
348 /// @see GPUNodeBuilder::createUser
349 class GPUNodeBuilder : public IslNodeBuilder {
350 public:
351   GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator,
352                  const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE,
353                  DominatorTree &DT, Scop &S, BasicBlock *StartBlock,
354                  gpu_prog *Prog, GPURuntime Runtime, GPUArch Arch)
355       : IslNodeBuilder(Builder, Annotator, DL, LI, SE, DT, S, StartBlock),
356         Prog(Prog), Runtime(Runtime), Arch(Arch) {
357     getExprBuilder().setIDToSAI(&IDToSAI);
358   }
359 
360   /// Create after-run-time-check initialization code.
361   void initializeAfterRTH();
362 
363   /// Finalize the generated scop.
364   void finalize() override;
365 
366   /// Track if the full build process was successful.
367   ///
368   /// This value is set to false, if throughout the build process an error
369   /// occurred which prevents us from generating valid GPU code.
370   bool BuildSuccessful = true;
371 
372   /// The maximal number of loops surrounding a sequential kernel.
373   unsigned DeepestSequential = 0;
374 
375   /// The maximal number of loops surrounding a parallel kernel.
376   unsigned DeepestParallel = 0;
377 
378   /// Return the name to set for the ptx_kernel.
379   std::string getKernelFuncName(int Kernel_id);
380 
381 private:
382   /// A vector of array base pointers for which a new ScopArrayInfo was created.
383   ///
384   /// This vector is used to delete the ScopArrayInfo when it is not needed any
385   /// more.
386   std::vector<Value *> LocalArrays;
387 
388   /// A map from ScopArrays to their corresponding device allocations.
389   std::map<ScopArrayInfo *, Value *> DeviceAllocations;
390 
391   /// The current GPU context.
392   Value *GPUContext;
393 
394   /// The set of isl_ids allocated in the kernel
395   std::vector<isl_id *> KernelIds;
396 
397   /// A module containing GPU code.
398   ///
399   /// This pointer is only set in case we are currently generating GPU code.
400   std::unique_ptr<Module> GPUModule;
401 
402   /// The GPU program we generate code for.
403   gpu_prog *Prog;
404 
405   /// The GPU Runtime implementation to use (OpenCL or CUDA).
406   GPURuntime Runtime;
407 
408   /// The GPU Architecture to target.
409   GPUArch Arch;
410 
411   /// Class to free isl_ids.
412   class IslIdDeleter {
413   public:
414     void operator()(__isl_take isl_id *Id) { isl_id_free(Id); };
415   };
416 
417   /// A set containing all isl_ids allocated in a GPU kernel.
418   ///
419   /// By releasing this set all isl_ids will be freed.
420   std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs;
421 
422   IslExprBuilder::IDToScopArrayInfoTy IDToSAI;
423 
424   /// Create code for user-defined AST nodes.
425   ///
426   /// These AST nodes can be of type:
427   ///
428   ///   - ScopStmt:      A computational statement (TODO)
429   ///   - Kernel:        A GPU kernel call (TODO)
430   ///   - Data-Transfer: A GPU <-> CPU data-transfer
431   ///   - In-kernel synchronization
432   ///   - In-kernel memory copy statement
433   ///
434   /// @param UserStmt The ast node to generate code for.
435   void createUser(__isl_take isl_ast_node *UserStmt) override;
436 
437   void createFor(__isl_take isl_ast_node *Node) override;
438 
439   enum DataDirection { HOST_TO_DEVICE, DEVICE_TO_HOST };
440 
441   /// Create code for a data transfer statement
442   ///
443   /// @param TransferStmt The data transfer statement.
444   /// @param Direction The direction in which to transfer data.
445   void createDataTransfer(__isl_take isl_ast_node *TransferStmt,
446                           enum DataDirection Direction);
447 
448   /// Find llvm::Values referenced in GPU kernel.
449   ///
450   /// @param Kernel The kernel to scan for llvm::Values
451   ///
452   /// @returns A tuple, whose:
453   ///          - First element contains the set of values referenced by the
454   ///            kernel
455   ///          - Second element contains the set of functions referenced by the
456   ///             kernel. All functions in the set satisfy
457   ///             `isValidFunctionInKernel`.
458   ///          - Third element contains loops that have induction variables
459   ///            which are used in the kernel, *and* these loops are *neither*
460   ///            in the scop, nor do they immediately surroung the Scop.
461   ///            See [Code generation of induction variables of loops outside
462   ///            Scops]
463   std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>,
464              isl::space>
465   getReferencesInKernel(ppcg_kernel *Kernel);
466 
467   /// Compute the sizes of the execution grid for a given kernel.
468   ///
469   /// @param Kernel The kernel to compute grid sizes for.
470   ///
471   /// @returns A tuple with grid sizes for X and Y dimension
472   std::tuple<Value *, Value *> getGridSizes(ppcg_kernel *Kernel);
473 
474   /// Get the managed array pointer for sending host pointers to the device.
475   /// \note
476   /// This is to be used only with managed memory
477   Value *getManagedDeviceArray(gpu_array_info *Array, ScopArrayInfo *ArrayInfo);
478 
479   /// Compute the sizes of the thread blocks for a given kernel.
480   ///
481   /// @param Kernel The kernel to compute thread block sizes for.
482   ///
483   /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
484   std::tuple<Value *, Value *, Value *> getBlockSizes(ppcg_kernel *Kernel);
485 
486   /// Store a specific kernel launch parameter in the array of kernel launch
487   /// parameters.
488   ///
489   /// @param ArrayTy    Array type of \p Parameters.
490   /// @param Parameters The list of parameters in which to store.
491   /// @param Param      The kernel launch parameter to store.
492   /// @param Index      The index in the parameter list, at which to store the
493   ///                   parameter.
494   void insertStoreParameter(Type *ArrayTy, Instruction *Parameters,
495                             Instruction *Param, int Index);
496 
497   /// Create kernel launch parameters.
498   ///
499   /// @param Kernel        The kernel to create parameters for.
500   /// @param F             The kernel function that has been created.
501   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
502   ///
503   /// @returns A stack allocated array with pointers to the parameter
504   ///          values that are passed to the kernel.
505   Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F,
506                                 SetVector<Value *> SubtreeValues);
507 
508   /// Create declarations for kernel variable.
509   ///
510   /// This includes shared memory declarations.
511   ///
512   /// @param Kernel        The kernel definition to create variables for.
513   /// @param FN            The function into which to generate the variables.
514   void createKernelVariables(ppcg_kernel *Kernel, Function *FN);
515 
516   /// Add CUDA annotations to module.
517   ///
518   /// Add a set of CUDA annotations that declares the maximal block dimensions
519   /// that will be used to execute the CUDA kernel. This allows the NVIDIA
520   /// PTX compiler to bound the number of allocated registers to ensure the
521   /// resulting kernel is known to run with up to as many block dimensions
522   /// as specified here.
523   ///
524   /// @param M         The module to add the annotations to.
525   /// @param BlockDimX The size of block dimension X.
526   /// @param BlockDimY The size of block dimension Y.
527   /// @param BlockDimZ The size of block dimension Z.
528   void addCUDAAnnotations(Module *M, Value *BlockDimX, Value *BlockDimY,
529                           Value *BlockDimZ);
530 
531   /// Create GPU kernel.
532   ///
533   /// Code generate the kernel described by @p KernelStmt.
534   ///
535   /// @param KernelStmt The ast node to generate kernel code for.
536   void createKernel(__isl_take isl_ast_node *KernelStmt);
537 
538   /// Generate code that computes the size of an array.
539   ///
540   /// @param Array The array for which to compute a size.
541   Value *getArraySize(gpu_array_info *Array);
542 
543   /// Generate code to compute the minimal offset at which an array is accessed.
544   ///
545   /// The offset of an array is the minimal array location accessed in a scop.
546   ///
547   /// Example:
548   ///
549   ///   for (long i = 0; i < 100; i++)
550   ///     A[i + 42] += ...
551   ///
552   ///   getArrayOffset(A) results in 42.
553   ///
554   /// @param Array The array for which to compute the offset.
555   /// @returns An llvm::Value that contains the offset of the array.
556   Value *getArrayOffset(gpu_array_info *Array);
557 
558   /// Prepare the kernel arguments for kernel code generation
559   ///
560   /// @param Kernel The kernel to generate code for.
561   /// @param FN     The function created for the kernel.
562   void prepareKernelArguments(ppcg_kernel *Kernel, Function *FN);
563 
564   /// Create kernel function.
565   ///
566   /// Create a kernel function located in a newly created module that can serve
567   /// as target for device code generation. Set the Builder to point to the
568   /// start block of this newly created function.
569   ///
570   /// @param Kernel The kernel to generate code for.
571   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
572   /// @param SubtreeFunctions The set of llvm::Functions referenced by this
573   ///                         kernel.
574   void createKernelFunction(ppcg_kernel *Kernel,
575                             SetVector<Value *> &SubtreeValues,
576                             SetVector<Function *> &SubtreeFunctions);
577 
578   /// Create the declaration of a kernel function.
579   ///
580   /// The kernel function takes as arguments:
581   ///
582   ///   - One i8 pointer for each external array reference used in the kernel.
583   ///   - Host iterators
584   ///   - Parameters
585   ///   - Other LLVM Value references (TODO)
586   ///
587   /// @param Kernel The kernel to generate the function declaration for.
588   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
589   ///
590   /// @returns The newly declared function.
591   Function *createKernelFunctionDecl(ppcg_kernel *Kernel,
592                                      SetVector<Value *> &SubtreeValues);
593 
594   /// Insert intrinsic functions to obtain thread and block ids.
595   ///
596   /// @param The kernel to generate the intrinsic functions for.
597   void insertKernelIntrinsics(ppcg_kernel *Kernel);
598 
599   /// Insert function calls to retrieve the SPIR group/local ids.
600   ///
601   /// @param Kernel The kernel to generate the function calls for.
602   /// @param SizeTypeIs64Bit Whether size_t of the openCl device is 64bit.
603   void insertKernelCallsSPIR(ppcg_kernel *Kernel, bool SizeTypeIs64bit);
604 
605   /// Setup the creation of functions referenced by the GPU kernel.
606   ///
607   /// 1. Create new function declarations in GPUModule which are the same as
608   /// SubtreeFunctions.
609   ///
610   /// 2. Populate IslNodeBuilder::ValueMap with mappings from
611   /// old functions (that come from the original module) to new functions
612   /// (that are created within GPUModule). That way, we generate references
613   /// to the correct function (in GPUModule) in BlockGenerator.
614   ///
615   /// @see IslNodeBuilder::ValueMap
616   /// @see BlockGenerator::GlobalMap
617   /// @see BlockGenerator::getNewValue
618   /// @see GPUNodeBuilder::getReferencesInKernel.
619   ///
620   /// @param SubtreeFunctions The set of llvm::Functions referenced by
621   ///                         this kernel.
622   void setupKernelSubtreeFunctions(SetVector<Function *> SubtreeFunctions);
623 
624   /// Create a global-to-shared or shared-to-global copy statement.
625   ///
626   /// @param CopyStmt The copy statement to generate code for
627   void createKernelCopy(ppcg_kernel_stmt *CopyStmt);
628 
629   /// Create code for a ScopStmt called in @p Expr.
630   ///
631   /// @param Expr The expression containing the call.
632   /// @param KernelStmt The kernel statement referenced in the call.
633   void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt);
634 
635   /// Create an in-kernel synchronization call.
636   void createKernelSync();
637 
638   /// Create a PTX assembly string for the current GPU kernel.
639   ///
640   /// @returns A string containing the corresponding PTX assembly code.
641   std::string createKernelASM();
642 
643   /// Remove references from the dominator tree to the kernel function @p F.
644   ///
645   /// @param F The function to remove references to.
646   void clearDominators(Function *F);
647 
648   /// Remove references from scalar evolution to the kernel function @p F.
649   ///
650   /// @param F The function to remove references to.
651   void clearScalarEvolution(Function *F);
652 
653   /// Remove references from loop info to the kernel function @p F.
654   ///
655   /// @param F The function to remove references to.
656   void clearLoops(Function *F);
657 
658   /// Check if the scop requires to be linked with CUDA's libdevice.
659   bool requiresCUDALibDevice();
660 
661   /// Link with the NVIDIA libdevice library (if needed and available).
662   void addCUDALibDevice();
663 
664   /// Finalize the generation of the kernel function.
665   ///
666   /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
667   /// dump its IR to stderr.
668   ///
669   /// @returns The Assembly string of the kernel.
670   std::string finalizeKernelFunction();
671 
672   /// Finalize the generation of the kernel arguments.
673   ///
674   /// This function ensures that not-read-only scalars used in a kernel are
675   /// stored back to the global memory location they are backed with before
676   /// the kernel terminates.
677   ///
678   /// @params Kernel The kernel to finalize kernel arguments for.
679   void finalizeKernelArguments(ppcg_kernel *Kernel);
680 
681   /// Create code that allocates memory to store arrays on device.
682   void allocateDeviceArrays();
683 
684   /// Create code to prepare the managed device pointers.
685   void prepareManagedDeviceArrays();
686 
687   /// Free all allocated device arrays.
688   void freeDeviceArrays();
689 
690   /// Create a call to initialize the GPU context.
691   ///
692   /// @returns A pointer to the newly initialized context.
693   Value *createCallInitContext();
694 
695   /// Create a call to get the device pointer for a kernel allocation.
696   ///
697   /// @param Allocation The Polly GPU allocation
698   ///
699   /// @returns The device parameter corresponding to this allocation.
700   Value *createCallGetDevicePtr(Value *Allocation);
701 
702   /// Create a call to free the GPU context.
703   ///
704   /// @param Context A pointer to an initialized GPU context.
705   void createCallFreeContext(Value *Context);
706 
707   /// Create a call to allocate memory on the device.
708   ///
709   /// @param Size The size of memory to allocate
710   ///
711   /// @returns A pointer that identifies this allocation.
712   Value *createCallAllocateMemoryForDevice(Value *Size);
713 
714   /// Create a call to free a device array.
715   ///
716   /// @param Array The device array to free.
717   void createCallFreeDeviceMemory(Value *Array);
718 
719   /// Create a call to copy data from host to device.
720   ///
721   /// @param HostPtr A pointer to the host data that should be copied.
722   /// @param DevicePtr A device pointer specifying the location to copy to.
723   void createCallCopyFromHostToDevice(Value *HostPtr, Value *DevicePtr,
724                                       Value *Size);
725 
726   /// Create a call to copy data from device to host.
727   ///
728   /// @param DevicePtr A pointer to the device data that should be copied.
729   /// @param HostPtr A host pointer specifying the location to copy to.
730   void createCallCopyFromDeviceToHost(Value *DevicePtr, Value *HostPtr,
731                                       Value *Size);
732 
733   /// Create a call to synchronize Host & Device.
734   /// \note
735   /// This is to be used only with managed memory.
736   void createCallSynchronizeDevice();
737 
738   /// Create a call to get a kernel from an assembly string.
739   ///
740   /// @param Buffer The string describing the kernel.
741   /// @param Entry  The name of the kernel function to call.
742   ///
743   /// @returns A pointer to a kernel object
744   Value *createCallGetKernel(Value *Buffer, Value *Entry);
745 
746   /// Create a call to free a GPU kernel.
747   ///
748   /// @param GPUKernel THe kernel to free.
749   void createCallFreeKernel(Value *GPUKernel);
750 
751   /// Create a call to launch a GPU kernel.
752   ///
753   /// @param GPUKernel  The kernel to launch.
754   /// @param GridDimX   The size of the first grid dimension.
755   /// @param GridDimY   The size of the second grid dimension.
756   /// @param GridBlockX The size of the first block dimension.
757   /// @param GridBlockY The size of the second block dimension.
758   /// @param GridBlockZ The size of the third block dimension.
759   /// @param Parameters A pointer to an array that contains itself pointers to
760   ///                   the parameter values passed for each kernel argument.
761   void createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
762                               Value *GridDimY, Value *BlockDimX,
763                               Value *BlockDimY, Value *BlockDimZ,
764                               Value *Parameters);
765 };
766 
767 std::string GPUNodeBuilder::getKernelFuncName(int Kernel_id) {
768   return "FUNC_" + S.getFunction().getName().str() + "_SCOP_" +
769          std::to_string(S.getID()) + "_KERNEL_" + std::to_string(Kernel_id);
770 }
771 
772 void GPUNodeBuilder::initializeAfterRTH() {
773   BasicBlock *NewBB = SplitBlock(Builder.GetInsertBlock(),
774                                  &*Builder.GetInsertPoint(), &DT, &LI);
775   NewBB->setName("polly.acc.initialize");
776   Builder.SetInsertPoint(&NewBB->front());
777 
778   GPUContext = createCallInitContext();
779 
780   if (!PollyManagedMemory)
781     allocateDeviceArrays();
782   else
783     prepareManagedDeviceArrays();
784 }
785 
786 void GPUNodeBuilder::finalize() {
787   if (!PollyManagedMemory)
788     freeDeviceArrays();
789 
790   createCallFreeContext(GPUContext);
791   IslNodeBuilder::finalize();
792 }
793 
794 void GPUNodeBuilder::allocateDeviceArrays() {
795   assert(!PollyManagedMemory &&
796          "Managed memory will directly send host pointers "
797          "to the kernel. There is no need for device arrays");
798   isl_ast_build *Build = isl_ast_build_from_context(S.getContext().release());
799 
800   for (int i = 0; i < Prog->n_array; ++i) {
801     gpu_array_info *Array = &Prog->array[i];
802     auto *ScopArray = (ScopArrayInfo *)Array->user;
803     std::string DevArrayName("p_dev_array_");
804     DevArrayName.append(Array->name);
805 
806     Value *ArraySize = getArraySize(Array);
807     Value *Offset = getArrayOffset(Array);
808     if (Offset)
809       ArraySize = Builder.CreateSub(
810           ArraySize,
811           Builder.CreateMul(Offset,
812                             Builder.getInt64(ScopArray->getElemSizeInBytes())));
813     const SCEV *SizeSCEV = SE.getSCEV(ArraySize);
814     // It makes no sense to have an array of size 0. The CUDA API will
815     // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We
816     // choose to be defensive and catch this at the compile phase. It is
817     // most likely that we are doing something wrong with size computation.
818     if (SizeSCEV->isZero()) {
819       errs() << getUniqueScopName(&S)
820              << " has computed array size 0: " << *ArraySize
821              << " | for array: " << *(ScopArray->getBasePtr())
822              << ". This is illegal, exiting.\n";
823       report_fatal_error("array size was computed to be 0");
824     }
825 
826     Value *DevArray = createCallAllocateMemoryForDevice(ArraySize);
827     DevArray->setName(DevArrayName);
828     DeviceAllocations[ScopArray] = DevArray;
829   }
830 
831   isl_ast_build_free(Build);
832 }
833 
834 void GPUNodeBuilder::prepareManagedDeviceArrays() {
835   assert(PollyManagedMemory &&
836          "Device array most only be prepared in managed-memory mode");
837   for (int i = 0; i < Prog->n_array; ++i) {
838     gpu_array_info *Array = &Prog->array[i];
839     ScopArrayInfo *ScopArray = (ScopArrayInfo *)Array->user;
840     Value *HostPtr;
841 
842     if (gpu_array_is_scalar(Array))
843       HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
844     else
845       HostPtr = ScopArray->getBasePtr();
846     HostPtr = getLatestValue(HostPtr);
847 
848     Value *Offset = getArrayOffset(Array);
849     if (Offset) {
850       HostPtr = Builder.CreatePointerCast(
851           HostPtr, ScopArray->getElementType()->getPointerTo());
852       HostPtr = Builder.CreateGEP(ScopArray->getElementType(), HostPtr, Offset);
853     }
854 
855     HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
856     DeviceAllocations[ScopArray] = HostPtr;
857   }
858 }
859 
860 void GPUNodeBuilder::addCUDAAnnotations(Module *M, Value *BlockDimX,
861                                         Value *BlockDimY, Value *BlockDimZ) {
862   auto AnnotationNode = M->getOrInsertNamedMetadata("nvvm.annotations");
863 
864   for (auto &F : *M) {
865     if (F.getCallingConv() != CallingConv::PTX_Kernel)
866       continue;
867 
868     Value *V[] = {BlockDimX, BlockDimY, BlockDimZ};
869 
870     Metadata *Elements[] = {
871         ValueAsMetadata::get(&F),   MDString::get(M->getContext(), "maxntidx"),
872         ValueAsMetadata::get(V[0]), MDString::get(M->getContext(), "maxntidy"),
873         ValueAsMetadata::get(V[1]), MDString::get(M->getContext(), "maxntidz"),
874         ValueAsMetadata::get(V[2]),
875     };
876     MDNode *Node = MDNode::get(M->getContext(), Elements);
877     AnnotationNode->addOperand(Node);
878   }
879 }
880 
881 void GPUNodeBuilder::freeDeviceArrays() {
882   assert(!PollyManagedMemory && "Managed memory does not use device arrays");
883   for (auto &Array : DeviceAllocations)
884     createCallFreeDeviceMemory(Array.second);
885 }
886 
887 Value *GPUNodeBuilder::createCallGetKernel(Value *Buffer, Value *Entry) {
888   const char *Name = "polly_getKernel";
889   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
890   Function *F = M->getFunction(Name);
891 
892   // If F is not available, declare it.
893   if (!F) {
894     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
895     std::vector<Type *> Args;
896     Args.push_back(Builder.getInt8PtrTy());
897     Args.push_back(Builder.getInt8PtrTy());
898     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
899     F = Function::Create(Ty, Linkage, Name, M);
900   }
901 
902   return Builder.CreateCall(F, {Buffer, Entry});
903 }
904 
905 Value *GPUNodeBuilder::createCallGetDevicePtr(Value *Allocation) {
906   const char *Name = "polly_getDevicePtr";
907   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
908   Function *F = M->getFunction(Name);
909 
910   // If F is not available, declare it.
911   if (!F) {
912     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
913     std::vector<Type *> Args;
914     Args.push_back(Builder.getInt8PtrTy());
915     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
916     F = Function::Create(Ty, Linkage, Name, M);
917   }
918 
919   return Builder.CreateCall(F, {Allocation});
920 }
921 
922 void GPUNodeBuilder::createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
923                                             Value *GridDimY, Value *BlockDimX,
924                                             Value *BlockDimY, Value *BlockDimZ,
925                                             Value *Parameters) {
926   const char *Name = "polly_launchKernel";
927   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
928   Function *F = M->getFunction(Name);
929 
930   // If F is not available, declare it.
931   if (!F) {
932     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
933     std::vector<Type *> Args;
934     Args.push_back(Builder.getInt8PtrTy());
935     Args.push_back(Builder.getInt32Ty());
936     Args.push_back(Builder.getInt32Ty());
937     Args.push_back(Builder.getInt32Ty());
938     Args.push_back(Builder.getInt32Ty());
939     Args.push_back(Builder.getInt32Ty());
940     Args.push_back(Builder.getInt8PtrTy());
941     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
942     F = Function::Create(Ty, Linkage, Name, M);
943   }
944 
945   Builder.CreateCall(F, {GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
946                          BlockDimZ, Parameters});
947 }
948 
949 void GPUNodeBuilder::createCallFreeKernel(Value *GPUKernel) {
950   const char *Name = "polly_freeKernel";
951   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
952   Function *F = M->getFunction(Name);
953 
954   // If F is not available, declare it.
955   if (!F) {
956     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
957     std::vector<Type *> Args;
958     Args.push_back(Builder.getInt8PtrTy());
959     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
960     F = Function::Create(Ty, Linkage, Name, M);
961   }
962 
963   Builder.CreateCall(F, {GPUKernel});
964 }
965 
966 void GPUNodeBuilder::createCallFreeDeviceMemory(Value *Array) {
967   assert(!PollyManagedMemory &&
968          "Managed memory does not allocate or free memory "
969          "for device");
970   const char *Name = "polly_freeDeviceMemory";
971   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
972   Function *F = M->getFunction(Name);
973 
974   // If F is not available, declare it.
975   if (!F) {
976     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
977     std::vector<Type *> Args;
978     Args.push_back(Builder.getInt8PtrTy());
979     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
980     F = Function::Create(Ty, Linkage, Name, M);
981   }
982 
983   Builder.CreateCall(F, {Array});
984 }
985 
986 Value *GPUNodeBuilder::createCallAllocateMemoryForDevice(Value *Size) {
987   assert(!PollyManagedMemory &&
988          "Managed memory does not allocate or free memory "
989          "for device");
990   const char *Name = "polly_allocateMemoryForDevice";
991   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
992   Function *F = M->getFunction(Name);
993 
994   // If F is not available, declare it.
995   if (!F) {
996     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
997     std::vector<Type *> Args;
998     Args.push_back(Builder.getInt64Ty());
999     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
1000     F = Function::Create(Ty, Linkage, Name, M);
1001   }
1002 
1003   return Builder.CreateCall(F, {Size});
1004 }
1005 
1006 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value *HostData,
1007                                                     Value *DeviceData,
1008                                                     Value *Size) {
1009   assert(!PollyManagedMemory &&
1010          "Managed memory does not transfer memory between "
1011          "device and host");
1012   const char *Name = "polly_copyFromHostToDevice";
1013   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1014   Function *F = M->getFunction(Name);
1015 
1016   // If F is not available, declare it.
1017   if (!F) {
1018     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1019     std::vector<Type *> Args;
1020     Args.push_back(Builder.getInt8PtrTy());
1021     Args.push_back(Builder.getInt8PtrTy());
1022     Args.push_back(Builder.getInt64Ty());
1023     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1024     F = Function::Create(Ty, Linkage, Name, M);
1025   }
1026 
1027   Builder.CreateCall(F, {HostData, DeviceData, Size});
1028 }
1029 
1030 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value *DeviceData,
1031                                                     Value *HostData,
1032                                                     Value *Size) {
1033   assert(!PollyManagedMemory &&
1034          "Managed memory does not transfer memory between "
1035          "device and host");
1036   const char *Name = "polly_copyFromDeviceToHost";
1037   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1038   Function *F = M->getFunction(Name);
1039 
1040   // If F is not available, declare it.
1041   if (!F) {
1042     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1043     std::vector<Type *> Args;
1044     Args.push_back(Builder.getInt8PtrTy());
1045     Args.push_back(Builder.getInt8PtrTy());
1046     Args.push_back(Builder.getInt64Ty());
1047     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1048     F = Function::Create(Ty, Linkage, Name, M);
1049   }
1050 
1051   Builder.CreateCall(F, {DeviceData, HostData, Size});
1052 }
1053 
1054 void GPUNodeBuilder::createCallSynchronizeDevice() {
1055   assert(PollyManagedMemory && "explicit synchronization is only necessary for "
1056                                "managed memory");
1057   const char *Name = "polly_synchronizeDevice";
1058   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1059   Function *F = M->getFunction(Name);
1060 
1061   // If F is not available, declare it.
1062   if (!F) {
1063     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1064     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), false);
1065     F = Function::Create(Ty, Linkage, Name, M);
1066   }
1067 
1068   Builder.CreateCall(F);
1069 }
1070 
1071 Value *GPUNodeBuilder::createCallInitContext() {
1072   const char *Name;
1073 
1074   switch (Runtime) {
1075   case GPURuntime::CUDA:
1076     Name = "polly_initContextCUDA";
1077     break;
1078   case GPURuntime::OpenCL:
1079     Name = "polly_initContextCL";
1080     break;
1081   }
1082 
1083   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1084   Function *F = M->getFunction(Name);
1085 
1086   // If F is not available, declare it.
1087   if (!F) {
1088     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1089     std::vector<Type *> Args;
1090     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
1091     F = Function::Create(Ty, Linkage, Name, M);
1092   }
1093 
1094   return Builder.CreateCall(F, {});
1095 }
1096 
1097 void GPUNodeBuilder::createCallFreeContext(Value *Context) {
1098   const char *Name = "polly_freeContext";
1099   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1100   Function *F = M->getFunction(Name);
1101 
1102   // If F is not available, declare it.
1103   if (!F) {
1104     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1105     std::vector<Type *> Args;
1106     Args.push_back(Builder.getInt8PtrTy());
1107     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1108     F = Function::Create(Ty, Linkage, Name, M);
1109   }
1110 
1111   Builder.CreateCall(F, {Context});
1112 }
1113 
1114 /// Check if one string is a prefix of another.
1115 ///
1116 /// @param String The string in which to look for the prefix.
1117 /// @param Prefix The prefix to look for.
1118 static bool isPrefix(std::string String, std::string Prefix) {
1119   return String.find(Prefix) == 0;
1120 }
1121 
1122 Value *GPUNodeBuilder::getArraySize(gpu_array_info *Array) {
1123   isl::ast_build Build = isl::ast_build::from_context(S.getContext());
1124   Value *ArraySize = ConstantInt::get(Builder.getInt64Ty(), Array->size);
1125 
1126   if (!gpu_array_is_scalar(Array)) {
1127     isl::multi_pw_aff ArrayBound = isl::manage_copy(Array->bound);
1128 
1129     isl::pw_aff OffsetDimZero = ArrayBound.at(0);
1130     isl::ast_expr Res = Build.expr_from(OffsetDimZero);
1131 
1132     for (unsigned int i = 1; i < Array->n_index; i++) {
1133       isl::pw_aff Bound_I = ArrayBound.at(i);
1134       isl::ast_expr Expr = Build.expr_from(Bound_I);
1135       Res = Res.mul(Expr);
1136     }
1137 
1138     Value *NumElements = ExprBuilder.create(Res.release());
1139     if (NumElements->getType() != ArraySize->getType())
1140       NumElements = Builder.CreateSExt(NumElements, ArraySize->getType());
1141     ArraySize = Builder.CreateMul(ArraySize, NumElements);
1142   }
1143   return ArraySize;
1144 }
1145 
1146 Value *GPUNodeBuilder::getArrayOffset(gpu_array_info *Array) {
1147   if (gpu_array_is_scalar(Array))
1148     return nullptr;
1149 
1150   isl::ast_build Build = isl::ast_build::from_context(S.getContext());
1151 
1152   isl::set Min = isl::manage_copy(Array->extent).lexmin();
1153 
1154   isl::set ZeroSet = isl::set::universe(Min.get_space());
1155 
1156   for (unsigned i : rangeIslSize(0, Min.tuple_dim()))
1157     ZeroSet = ZeroSet.fix_si(isl::dim::set, i, 0);
1158 
1159   if (Min.is_subset(ZeroSet)) {
1160     return nullptr;
1161   }
1162 
1163   isl::ast_expr Result = isl::ast_expr::from_val(isl::val(Min.ctx(), 0));
1164 
1165   for (unsigned i : rangeIslSize(0, Min.tuple_dim())) {
1166     if (i > 0) {
1167       isl::pw_aff Bound_I =
1168           isl::manage(isl_multi_pw_aff_get_pw_aff(Array->bound, i - 1));
1169       isl::ast_expr BExpr = Build.expr_from(Bound_I);
1170       Result = Result.mul(BExpr);
1171     }
1172     isl::pw_aff DimMin = Min.dim_min(i);
1173     isl::ast_expr MExpr = Build.expr_from(DimMin);
1174     Result = Result.add(MExpr);
1175   }
1176 
1177   return ExprBuilder.create(Result.release());
1178 }
1179 
1180 Value *GPUNodeBuilder::getManagedDeviceArray(gpu_array_info *Array,
1181                                              ScopArrayInfo *ArrayInfo) {
1182   assert(PollyManagedMemory && "Only used when you wish to get a host "
1183                                "pointer for sending data to the kernel, "
1184                                "with managed memory");
1185   std::map<ScopArrayInfo *, Value *>::iterator it;
1186   it = DeviceAllocations.find(ArrayInfo);
1187   assert(it != DeviceAllocations.end() &&
1188          "Device array expected to be available");
1189   return it->second;
1190 }
1191 
1192 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node *TransferStmt,
1193                                         enum DataDirection Direction) {
1194   assert(!PollyManagedMemory && "Managed memory needs no data transfers");
1195   isl_ast_expr *Expr = isl_ast_node_user_get_expr(TransferStmt);
1196   isl_ast_expr *Arg = isl_ast_expr_get_op_arg(Expr, 0);
1197   isl_id *Id = isl_ast_expr_get_id(Arg);
1198   auto Array = (gpu_array_info *)isl_id_get_user(Id);
1199   auto ScopArray = (ScopArrayInfo *)(Array->user);
1200 
1201   Value *Size = getArraySize(Array);
1202   Value *Offset = getArrayOffset(Array);
1203   Value *DevPtr = DeviceAllocations[ScopArray];
1204 
1205   Value *HostPtr;
1206 
1207   if (gpu_array_is_scalar(Array))
1208     HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
1209   else
1210     HostPtr = ScopArray->getBasePtr();
1211   HostPtr = getLatestValue(HostPtr);
1212 
1213   if (Offset) {
1214     HostPtr = Builder.CreatePointerCast(
1215         HostPtr, ScopArray->getElementType()->getPointerTo());
1216     HostPtr = Builder.CreateGEP(ScopArray->getElementType(), HostPtr, Offset);
1217   }
1218 
1219   HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
1220 
1221   if (Offset) {
1222     Size = Builder.CreateSub(
1223         Size, Builder.CreateMul(
1224                   Offset, Builder.getInt64(ScopArray->getElemSizeInBytes())));
1225   }
1226 
1227   if (Direction == HOST_TO_DEVICE)
1228     createCallCopyFromHostToDevice(HostPtr, DevPtr, Size);
1229   else
1230     createCallCopyFromDeviceToHost(DevPtr, HostPtr, Size);
1231 
1232   isl_id_free(Id);
1233   isl_ast_expr_free(Arg);
1234   isl_ast_expr_free(Expr);
1235   isl_ast_node_free(TransferStmt);
1236 }
1237 
1238 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) {
1239   isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt);
1240   isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1241   isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1242   isl_id_free(Id);
1243   isl_ast_expr_free(StmtExpr);
1244 
1245   const char *Str = isl_id_get_name(Id);
1246   if (!strcmp(Str, "kernel")) {
1247     createKernel(UserStmt);
1248     if (PollyManagedMemory)
1249       createCallSynchronizeDevice();
1250     isl_ast_expr_free(Expr);
1251     return;
1252   }
1253   if (!strcmp(Str, "init_device")) {
1254     initializeAfterRTH();
1255     isl_ast_node_free(UserStmt);
1256     isl_ast_expr_free(Expr);
1257     return;
1258   }
1259   if (!strcmp(Str, "clear_device")) {
1260     finalize();
1261     isl_ast_node_free(UserStmt);
1262     isl_ast_expr_free(Expr);
1263     return;
1264   }
1265   if (isPrefix(Str, "to_device")) {
1266     if (!PollyManagedMemory)
1267       createDataTransfer(UserStmt, HOST_TO_DEVICE);
1268     else
1269       isl_ast_node_free(UserStmt);
1270 
1271     isl_ast_expr_free(Expr);
1272     return;
1273   }
1274 
1275   if (isPrefix(Str, "from_device")) {
1276     if (!PollyManagedMemory) {
1277       createDataTransfer(UserStmt, DEVICE_TO_HOST);
1278     } else {
1279       isl_ast_node_free(UserStmt);
1280     }
1281     isl_ast_expr_free(Expr);
1282     return;
1283   }
1284 
1285   isl_id *Anno = isl_ast_node_get_annotation(UserStmt);
1286   struct ppcg_kernel_stmt *KernelStmt =
1287       (struct ppcg_kernel_stmt *)isl_id_get_user(Anno);
1288   isl_id_free(Anno);
1289 
1290   switch (KernelStmt->type) {
1291   case ppcg_kernel_domain:
1292     createScopStmt(Expr, KernelStmt);
1293     isl_ast_node_free(UserStmt);
1294     return;
1295   case ppcg_kernel_copy:
1296     createKernelCopy(KernelStmt);
1297     isl_ast_expr_free(Expr);
1298     isl_ast_node_free(UserStmt);
1299     return;
1300   case ppcg_kernel_sync:
1301     createKernelSync();
1302     isl_ast_expr_free(Expr);
1303     isl_ast_node_free(UserStmt);
1304     return;
1305   }
1306 
1307   isl_ast_expr_free(Expr);
1308   isl_ast_node_free(UserStmt);
1309 }
1310 
1311 void GPUNodeBuilder::createFor(__isl_take isl_ast_node *Node) {
1312   createForSequential(isl::manage(Node).as<isl::ast_node_for>(), false);
1313 }
1314 
1315 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) {
1316   isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index);
1317   LocalIndex = isl_ast_expr_address_of(LocalIndex);
1318   Value *LocalAddr = ExprBuilder.create(LocalIndex);
1319   isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index);
1320   Index = isl_ast_expr_address_of(Index);
1321   Value *GlobalAddr = ExprBuilder.create(Index);
1322   Type *IndexTy = GlobalAddr->getType()->getPointerElementType();
1323 
1324   if (KernelStmt->u.c.read) {
1325     LoadInst *Load = Builder.CreateLoad(IndexTy, GlobalAddr, "shared.read");
1326     Builder.CreateStore(Load, LocalAddr);
1327   } else {
1328     LoadInst *Load = Builder.CreateLoad(IndexTy, LocalAddr, "shared.write");
1329     Builder.CreateStore(Load, GlobalAddr);
1330   }
1331 }
1332 
1333 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr,
1334                                     ppcg_kernel_stmt *KernelStmt) {
1335   auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1336   isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr;
1337 
1338   LoopToScevMapT LTS;
1339   LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end());
1340 
1341   createSubstitutions(Expr, Stmt, LTS);
1342 
1343   if (Stmt->isBlockStmt())
1344     BlockGen.copyStmt(*Stmt, LTS, Indexes);
1345   else
1346     RegionGen.copyStmt(*Stmt, LTS, Indexes);
1347 }
1348 
1349 void GPUNodeBuilder::createKernelSync() {
1350   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1351   const char *SpirName = "__gen_ocl_barrier_global";
1352 
1353   Function *Sync;
1354 
1355   switch (Arch) {
1356   case GPUArch::SPIR64:
1357   case GPUArch::SPIR32:
1358     Sync = M->getFunction(SpirName);
1359 
1360     // If Sync is not available, declare it.
1361     if (!Sync) {
1362       GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1363       std::vector<Type *> Args;
1364       FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1365       Sync = Function::Create(Ty, Linkage, SpirName, M);
1366       Sync->setCallingConv(CallingConv::SPIR_FUNC);
1367     }
1368     break;
1369   case GPUArch::NVPTX64:
1370     Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0);
1371     break;
1372   }
1373 
1374   Builder.CreateCall(Sync, {});
1375 }
1376 
1377 /// Collect llvm::Values referenced from @p Node
1378 ///
1379 /// This function only applies to isl_ast_nodes that are user_nodes referring
1380 /// to a ScopStmt. All other node types are ignore.
1381 ///
1382 /// @param Node The node to collect references for.
1383 /// @param User A user pointer used as storage for the data that is collected.
1384 ///
1385 /// @returns isl_bool_true if data could be collected successfully.
1386 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) {
1387   if (isl_ast_node_get_type(Node) != isl_ast_node_user)
1388     return isl_bool_true;
1389 
1390   isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node);
1391   isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1392   isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1393   const char *Str = isl_id_get_name(Id);
1394   isl_id_free(Id);
1395   isl_ast_expr_free(StmtExpr);
1396   isl_ast_expr_free(Expr);
1397 
1398   if (!isPrefix(Str, "Stmt"))
1399     return isl_bool_true;
1400 
1401   Id = isl_ast_node_get_annotation(Node);
1402   auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id);
1403   auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1404   isl_id_free(Id);
1405 
1406   addReferencesFromStmt(Stmt, User, false /* CreateScalarRefs */);
1407 
1408   return isl_bool_true;
1409 }
1410 
1411 /// A list of functions that are available in NVIDIA's libdevice.
1412 const std::set<std::string> CUDALibDeviceFunctions = {
1413     "exp",      "expf",      "expl",      "cos", "cosf", "sqrt", "sqrtf",
1414     "copysign", "copysignf", "copysignl", "log", "logf", "powi", "powif"};
1415 
1416 // A map from intrinsics to their corresponding libdevice functions.
1417 const std::map<std::string, std::string> IntrinsicToLibdeviceFunc = {
1418     {"llvm.exp.f64", "exp"},
1419     {"llvm.exp.f32", "expf"},
1420     {"llvm.powi.f64.i32", "powi"},
1421     {"llvm.powi.f32.i32", "powif"}};
1422 
1423 /// Return the corresponding CUDA libdevice function name @p Name.
1424 /// Note that this function will try to convert instrinsics in the list
1425 /// IntrinsicToLibdeviceFunc into libdevice functions.
1426 /// This is because some intrinsics such as `exp`
1427 /// are not supported by the NVPTX backend.
1428 /// If this restriction of the backend is lifted, we should refactor our code
1429 /// so that we use intrinsics whenever possible.
1430 ///
1431 /// Return "" if we are not compiling for CUDA.
1432 std::string getCUDALibDeviceFuntion(StringRef NameRef) {
1433   std::string Name = NameRef.str();
1434   auto It = IntrinsicToLibdeviceFunc.find(Name);
1435   if (It != IntrinsicToLibdeviceFunc.end())
1436     return getCUDALibDeviceFuntion(It->second);
1437 
1438   if (CUDALibDeviceFunctions.count(Name))
1439     return ("__nv_" + Name);
1440 
1441   return "";
1442 }
1443 
1444 /// Check if F is a function that we can code-generate in a GPU kernel.
1445 static bool isValidFunctionInKernel(llvm::Function *F, bool AllowLibDevice) {
1446   assert(F && "F is an invalid pointer");
1447   // We string compare against the name of the function to allow
1448   // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and
1449   // "llvm.copysign".
1450   const StringRef Name = F->getName();
1451 
1452   if (AllowLibDevice && getCUDALibDeviceFuntion(Name).length() > 0)
1453     return true;
1454 
1455   return F->isIntrinsic() &&
1456          (Name.startswith("llvm.sqrt") || Name.startswith("llvm.fabs") ||
1457           Name.startswith("llvm.copysign"));
1458 }
1459 
1460 /// Do not take `Function` as a subtree value.
1461 ///
1462 /// We try to take the reference of all subtree values and pass them along
1463 /// to the kernel from the host. Taking an address of any function and
1464 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1465 /// `Function`s.
1466 static bool isValidSubtreeValue(llvm::Value *V) { return !isa<Function>(V); }
1467 
1468 /// Return `Function`s from `RawSubtreeValues`.
1469 static SetVector<Function *>
1470 getFunctionsFromRawSubtreeValues(SetVector<Value *> RawSubtreeValues,
1471                                  bool AllowCUDALibDevice) {
1472   SetVector<Function *> SubtreeFunctions;
1473   for (Value *It : RawSubtreeValues) {
1474     Function *F = dyn_cast<Function>(It);
1475     if (F) {
1476       assert(isValidFunctionInKernel(F, AllowCUDALibDevice) &&
1477              "Code should have bailed out by "
1478              "this point if an invalid function "
1479              "were present in a kernel.");
1480       SubtreeFunctions.insert(F);
1481     }
1482   }
1483   return SubtreeFunctions;
1484 }
1485 
1486 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>,
1487            isl::space>
1488 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) {
1489   SetVector<Value *> SubtreeValues;
1490   SetVector<const SCEV *> SCEVs;
1491   SetVector<const Loop *> Loops;
1492   isl::space ParamSpace = isl::space(S.getIslCtx(), 0, 0).params();
1493   SubtreeReferences References = {
1494       LI,         SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator(),
1495       &ParamSpace};
1496 
1497   for (const auto &I : IDToValue)
1498     SubtreeValues.insert(I.second);
1499 
1500   // NOTE: this is populated in IslNodeBuilder::addParameters
1501   // See [Code generation of induction variables of loops outside Scops].
1502   for (const auto &I : OutsideLoopIterations)
1503     SubtreeValues.insert(cast<SCEVUnknown>(I.second)->getValue());
1504 
1505   isl_ast_node_foreach_descendant_top_down(
1506       Kernel->tree, collectReferencesInGPUStmt, &References);
1507 
1508   for (const SCEV *Expr : SCEVs) {
1509     findValues(Expr, SE, SubtreeValues);
1510     findLoops(Expr, Loops);
1511   }
1512 
1513   Loops.remove_if([this](const Loop *L) {
1514     return S.contains(L) || L->contains(S.getEntry());
1515   });
1516 
1517   for (auto &SAI : S.arrays())
1518     SubtreeValues.remove(SAI->getBasePtr());
1519 
1520   isl_space *Space = S.getParamSpace().release();
1521   for (long i = 0, n = isl_space_dim(Space, isl_dim_param); i < n; i++) {
1522     isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i);
1523     assert(IDToValue.count(Id));
1524     Value *Val = IDToValue[Id];
1525     SubtreeValues.remove(Val);
1526     isl_id_free(Id);
1527   }
1528   isl_space_free(Space);
1529 
1530   for (long i = 0, n = isl_space_dim(Kernel->space, isl_dim_set); i < n; i++) {
1531     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1532     assert(IDToValue.count(Id));
1533     Value *Val = IDToValue[Id];
1534     SubtreeValues.remove(Val);
1535     isl_id_free(Id);
1536   }
1537 
1538   // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1539   // SubtreeValues. This is important, because we should not lose any
1540   // SubtreeValues in the process of constructing the
1541   // "ValidSubtree{Values, Functions} sets. Nor should the set
1542   // ValidSubtree{Values, Functions} have any common element.
1543   auto ValidSubtreeValuesIt =
1544       make_filter_range(SubtreeValues, isValidSubtreeValue);
1545   SetVector<Value *> ValidSubtreeValues(ValidSubtreeValuesIt.begin(),
1546                                         ValidSubtreeValuesIt.end());
1547 
1548   bool AllowCUDALibDevice = Arch == GPUArch::NVPTX64;
1549 
1550   SetVector<Function *> ValidSubtreeFunctions(
1551       getFunctionsFromRawSubtreeValues(SubtreeValues, AllowCUDALibDevice));
1552 
1553   // @see IslNodeBuilder::getReferencesInSubtree
1554   SetVector<Value *> ReplacedValues;
1555   for (Value *V : ValidSubtreeValues) {
1556     auto It = ValueMap.find(V);
1557     if (It == ValueMap.end())
1558       ReplacedValues.insert(V);
1559     else
1560       ReplacedValues.insert(It->second);
1561   }
1562   return std::make_tuple(ReplacedValues, ValidSubtreeFunctions, Loops,
1563                          ParamSpace);
1564 }
1565 
1566 void GPUNodeBuilder::clearDominators(Function *F) {
1567   DomTreeNode *N = DT.getNode(&F->getEntryBlock());
1568   std::vector<BasicBlock *> Nodes;
1569   for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I)
1570     Nodes.push_back(I->getBlock());
1571 
1572   for (BasicBlock *BB : Nodes)
1573     DT.eraseNode(BB);
1574 }
1575 
1576 void GPUNodeBuilder::clearScalarEvolution(Function *F) {
1577   for (BasicBlock &BB : *F) {
1578     Loop *L = LI.getLoopFor(&BB);
1579     if (L)
1580       SE.forgetLoop(L);
1581   }
1582 }
1583 
1584 void GPUNodeBuilder::clearLoops(Function *F) {
1585   SmallSet<Loop *, 1> WorkList;
1586   for (BasicBlock &BB : *F) {
1587     Loop *L = LI.getLoopFor(&BB);
1588     if (L)
1589       WorkList.insert(L);
1590   }
1591   for (auto *L : WorkList)
1592     LI.erase(L);
1593 }
1594 
1595 std::tuple<Value *, Value *> GPUNodeBuilder::getGridSizes(ppcg_kernel *Kernel) {
1596   std::vector<Value *> Sizes;
1597   isl::ast_build Context = isl::ast_build::from_context(S.getContext());
1598 
1599   isl::multi_pw_aff GridSizePwAffs = isl::manage_copy(Kernel->grid_size);
1600   for (long i = 0; i < Kernel->n_grid; i++) {
1601     isl::pw_aff Size = GridSizePwAffs.at(i);
1602     isl::ast_expr GridSize = Context.expr_from(Size);
1603     Value *Res = ExprBuilder.create(GridSize.release());
1604     Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
1605     Sizes.push_back(Res);
1606   }
1607 
1608   for (long i = Kernel->n_grid; i < 3; i++)
1609     Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1610 
1611   return std::make_tuple(Sizes[0], Sizes[1]);
1612 }
1613 
1614 std::tuple<Value *, Value *, Value *>
1615 GPUNodeBuilder::getBlockSizes(ppcg_kernel *Kernel) {
1616   std::vector<Value *> Sizes;
1617 
1618   for (long i = 0; i < Kernel->n_block; i++) {
1619     Value *Res = ConstantInt::get(Builder.getInt32Ty(), Kernel->block_dim[i]);
1620     Sizes.push_back(Res);
1621   }
1622 
1623   for (long i = Kernel->n_block; i < 3; i++)
1624     Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1625 
1626   return std::make_tuple(Sizes[0], Sizes[1], Sizes[2]);
1627 }
1628 
1629 void GPUNodeBuilder::insertStoreParameter(Type *ArrayTy,
1630                                           Instruction *Parameters,
1631                                           Instruction *Param, int Index) {
1632   Value *Slot = Builder.CreateGEP(
1633       ArrayTy, Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1634   Value *ParamTyped = Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1635   Builder.CreateStore(ParamTyped, Slot);
1636 }
1637 
1638 Value *
1639 GPUNodeBuilder::createLaunchParameters(ppcg_kernel *Kernel, Function *F,
1640                                        SetVector<Value *> SubtreeValues) {
1641   const int NumArgs = F->arg_size();
1642   std::vector<int> ArgSizes(NumArgs);
1643 
1644   // If we are using the OpenCL Runtime, we need to add the kernel argument
1645   // sizes to the end of the launch-parameter list, so OpenCL can determine
1646   // how big the respective kernel arguments are.
1647   // Here we need to reserve adequate space for that.
1648   Type *ArrayTy;
1649   if (Runtime == GPURuntime::OpenCL)
1650     ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), 2 * NumArgs);
1651   else
1652     ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumArgs);
1653 
1654   BasicBlock *EntryBlock =
1655       &Builder.GetInsertBlock()->getParent()->getEntryBlock();
1656   auto AddressSpace = F->getParent()->getDataLayout().getAllocaAddrSpace();
1657   std::string Launch = "polly_launch_" + std::to_string(Kernel->id);
1658   Instruction *Parameters = new AllocaInst(
1659       ArrayTy, AddressSpace, Launch + "_params", EntryBlock->getTerminator());
1660 
1661   int Index = 0;
1662   for (long i = 0; i < Prog->n_array; i++) {
1663     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1664       continue;
1665 
1666     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1667     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id));
1668 
1669     if (Runtime == GPURuntime::OpenCL)
1670       ArgSizes[Index] = SAI->getElemSizeInBytes();
1671 
1672     Value *DevArray = nullptr;
1673     if (PollyManagedMemory) {
1674       DevArray = getManagedDeviceArray(&Prog->array[i],
1675                                        const_cast<ScopArrayInfo *>(SAI));
1676     } else {
1677       DevArray = DeviceAllocations[const_cast<ScopArrayInfo *>(SAI)];
1678       DevArray = createCallGetDevicePtr(DevArray);
1679     }
1680     assert(DevArray != nullptr && "Array to be offloaded to device not "
1681                                   "initialized");
1682     Value *Offset = getArrayOffset(&Prog->array[i]);
1683 
1684     if (Offset) {
1685       DevArray = Builder.CreatePointerCast(
1686           DevArray, SAI->getElementType()->getPointerTo());
1687       DevArray = Builder.CreateGEP(SAI->getElementType(), DevArray,
1688                                    Builder.CreateNeg(Offset));
1689       DevArray = Builder.CreatePointerCast(DevArray, Builder.getInt8PtrTy());
1690     }
1691     Value *Slot = Builder.CreateGEP(
1692         ArrayTy, Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1693 
1694     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1695       Value *ValPtr = nullptr;
1696       if (PollyManagedMemory)
1697         ValPtr = DevArray;
1698       else
1699         ValPtr = BlockGen.getOrCreateAlloca(SAI);
1700 
1701       assert(ValPtr != nullptr && "ValPtr that should point to a valid object"
1702                                   " to be stored into Parameters");
1703       Value *ValPtrCast =
1704           Builder.CreatePointerCast(ValPtr, Builder.getInt8PtrTy());
1705       Builder.CreateStore(ValPtrCast, Slot);
1706     } else {
1707       Instruction *Param =
1708           new AllocaInst(Builder.getInt8PtrTy(), AddressSpace,
1709                          Launch + "_param_" + std::to_string(Index),
1710                          EntryBlock->getTerminator());
1711       Builder.CreateStore(DevArray, Param);
1712       Value *ParamTyped =
1713           Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1714       Builder.CreateStore(ParamTyped, Slot);
1715     }
1716     Index++;
1717   }
1718 
1719   int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1720 
1721   for (long i = 0; i < NumHostIters; i++) {
1722     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1723     Value *Val = IDToValue[Id];
1724     isl_id_free(Id);
1725 
1726     if (Runtime == GPURuntime::OpenCL)
1727       ArgSizes[Index] = computeSizeInBytes(Val->getType());
1728 
1729     Instruction *Param =
1730         new AllocaInst(Val->getType(), AddressSpace,
1731                        Launch + "_param_" + std::to_string(Index),
1732                        EntryBlock->getTerminator());
1733     Builder.CreateStore(Val, Param);
1734     insertStoreParameter(ArrayTy, Parameters, Param, Index);
1735     Index++;
1736   }
1737 
1738   int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1739 
1740   for (long i = 0; i < NumVars; i++) {
1741     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1742     Value *Val = IDToValue[Id];
1743     if (ValueMap.count(Val))
1744       Val = ValueMap[Val];
1745     isl_id_free(Id);
1746 
1747     if (Runtime == GPURuntime::OpenCL)
1748       ArgSizes[Index] = computeSizeInBytes(Val->getType());
1749 
1750     Instruction *Param =
1751         new AllocaInst(Val->getType(), AddressSpace,
1752                        Launch + "_param_" + std::to_string(Index),
1753                        EntryBlock->getTerminator());
1754     Builder.CreateStore(Val, Param);
1755     insertStoreParameter(ArrayTy, Parameters, Param, Index);
1756     Index++;
1757   }
1758 
1759   for (auto Val : SubtreeValues) {
1760     if (Runtime == GPURuntime::OpenCL)
1761       ArgSizes[Index] = computeSizeInBytes(Val->getType());
1762 
1763     Instruction *Param =
1764         new AllocaInst(Val->getType(), AddressSpace,
1765                        Launch + "_param_" + std::to_string(Index),
1766                        EntryBlock->getTerminator());
1767     Builder.CreateStore(Val, Param);
1768     insertStoreParameter(ArrayTy, Parameters, Param, Index);
1769     Index++;
1770   }
1771 
1772   if (Runtime == GPURuntime::OpenCL) {
1773     for (int i = 0; i < NumArgs; i++) {
1774       Value *Val = ConstantInt::get(Builder.getInt32Ty(), ArgSizes[i]);
1775       Instruction *Param =
1776           new AllocaInst(Builder.getInt32Ty(), AddressSpace,
1777                          Launch + "_param_size_" + std::to_string(i),
1778                          EntryBlock->getTerminator());
1779       Builder.CreateStore(Val, Param);
1780       insertStoreParameter(ArrayTy, Parameters, Param, Index);
1781       Index++;
1782     }
1783   }
1784 
1785   auto Location = EntryBlock->getTerminator();
1786   return new BitCastInst(Parameters, Builder.getInt8PtrTy(),
1787                          Launch + "_params_i8ptr", Location);
1788 }
1789 
1790 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1791     SetVector<Function *> SubtreeFunctions) {
1792   for (auto Fn : SubtreeFunctions) {
1793     const std::string ClonedFnName = Fn->getName().str();
1794     Function *Clone = GPUModule->getFunction(ClonedFnName);
1795     if (!Clone)
1796       Clone =
1797           Function::Create(Fn->getFunctionType(), GlobalValue::ExternalLinkage,
1798                            ClonedFnName, GPUModule.get());
1799     assert(Clone && "Expected cloned function to be initialized.");
1800     assert(ValueMap.find(Fn) == ValueMap.end() &&
1801            "Fn already present in ValueMap");
1802     ValueMap[Fn] = Clone;
1803   }
1804 }
1805 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) {
1806   isl_id *Id = isl_ast_node_get_annotation(KernelStmt);
1807   ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id);
1808   isl_id_free(Id);
1809   isl_ast_node_free(KernelStmt);
1810 
1811   if (Kernel->n_grid > 1)
1812     DeepestParallel = std::max(
1813         DeepestParallel, (unsigned)isl_space_dim(Kernel->space, isl_dim_set));
1814   else
1815     DeepestSequential = std::max(
1816         DeepestSequential, (unsigned)isl_space_dim(Kernel->space, isl_dim_set));
1817 
1818   Value *BlockDimX, *BlockDimY, *BlockDimZ;
1819   std::tie(BlockDimX, BlockDimY, BlockDimZ) = getBlockSizes(Kernel);
1820 
1821   SetVector<Value *> SubtreeValues;
1822   SetVector<Function *> SubtreeFunctions;
1823   SetVector<const Loop *> Loops;
1824   isl::space ParamSpace;
1825   std::tie(SubtreeValues, SubtreeFunctions, Loops, ParamSpace) =
1826       getReferencesInKernel(Kernel);
1827 
1828   // Add parameters that appear only in the access function to the kernel
1829   // space. This is important to make sure that all isl_ids are passed as
1830   // parameters to the kernel, even though we may not have all parameters
1831   // in the context to improve compile time.
1832   Kernel->space = isl_space_align_params(Kernel->space, ParamSpace.release());
1833 
1834   assert(Kernel->tree && "Device AST of kernel node is empty");
1835 
1836   Instruction &HostInsertPoint = *Builder.GetInsertPoint();
1837   IslExprBuilder::IDToValueTy HostIDs = IDToValue;
1838   ValueMapT HostValueMap = ValueMap;
1839   BlockGenerator::AllocaMapTy HostScalarMap = ScalarMap;
1840   ScalarMap.clear();
1841   BlockGenerator::EscapeUsersAllocaMapTy HostEscapeMap = EscapeMap;
1842   EscapeMap.clear();
1843 
1844   // Create for all loops we depend on values that contain the current loop
1845   // iteration. These values are necessary to generate code for SCEVs that
1846   // depend on such loops. As a result we need to pass them to the subfunction.
1847   for (const Loop *L : Loops) {
1848     const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)),
1849                                             SE.getUnknown(Builder.getInt64(1)),
1850                                             L, SCEV::FlagAnyWrap);
1851     Value *V = generateSCEV(OuterLIV);
1852     OutsideLoopIterations[L] = SE.getUnknown(V);
1853     SubtreeValues.insert(V);
1854   }
1855 
1856   createKernelFunction(Kernel, SubtreeValues, SubtreeFunctions);
1857   setupKernelSubtreeFunctions(SubtreeFunctions);
1858 
1859   create(isl_ast_node_copy(Kernel->tree));
1860 
1861   finalizeKernelArguments(Kernel);
1862   Function *F = Builder.GetInsertBlock()->getParent();
1863   if (Arch == GPUArch::NVPTX64)
1864     addCUDAAnnotations(F->getParent(), BlockDimX, BlockDimY, BlockDimZ);
1865   clearDominators(F);
1866   clearScalarEvolution(F);
1867   clearLoops(F);
1868 
1869   IDToValue = HostIDs;
1870 
1871   ValueMap = std::move(HostValueMap);
1872   ScalarMap = std::move(HostScalarMap);
1873   EscapeMap = std::move(HostEscapeMap);
1874   IDToSAI.clear();
1875   Annotator.resetAlternativeAliasBases();
1876   for (auto &BasePtr : LocalArrays)
1877     S.invalidateScopArrayInfo(BasePtr, MemoryKind::Array);
1878   LocalArrays.clear();
1879 
1880   std::string ASMString = finalizeKernelFunction();
1881   Builder.SetInsertPoint(&HostInsertPoint);
1882   Value *Parameters = createLaunchParameters(Kernel, F, SubtreeValues);
1883 
1884   std::string Name = getKernelFuncName(Kernel->id);
1885   Value *KernelString = Builder.CreateGlobalStringPtr(ASMString, Name);
1886   Value *NameString = Builder.CreateGlobalStringPtr(Name, Name + "_name");
1887   Value *GPUKernel = createCallGetKernel(KernelString, NameString);
1888 
1889   Value *GridDimX, *GridDimY;
1890   std::tie(GridDimX, GridDimY) = getGridSizes(Kernel);
1891 
1892   createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
1893                          BlockDimZ, Parameters);
1894   createCallFreeKernel(GPUKernel);
1895 
1896   for (auto Id : KernelIds)
1897     isl_id_free(Id);
1898 
1899   KernelIds.clear();
1900 }
1901 
1902 /// Compute the DataLayout string for the NVPTX backend.
1903 ///
1904 /// @param is64Bit Are we looking for a 64 bit architecture?
1905 static std::string computeNVPTXDataLayout(bool is64Bit) {
1906   std::string Ret = "";
1907 
1908   if (!is64Bit) {
1909     Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1910            "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1911            "64-v128:128:128-n16:32:64";
1912   } else {
1913     Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1914            "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1915            "64-v128:128:128-n16:32:64";
1916   }
1917 
1918   return Ret;
1919 }
1920 
1921 /// Compute the DataLayout string for a SPIR kernel.
1922 ///
1923 /// @param is64Bit Are we looking for a 64 bit architecture?
1924 static std::string computeSPIRDataLayout(bool is64Bit) {
1925   std::string Ret = "";
1926 
1927   if (!is64Bit) {
1928     Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1929            "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1930            "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1931            "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1932   } else {
1933     Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1934            "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1935            "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1936            "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1937   }
1938 
1939   return Ret;
1940 }
1941 
1942 Function *
1943 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel,
1944                                          SetVector<Value *> &SubtreeValues) {
1945   std::vector<Type *> Args;
1946   std::string Identifier = getKernelFuncName(Kernel->id);
1947 
1948   std::vector<Metadata *> MemoryType;
1949 
1950   for (long i = 0; i < Prog->n_array; i++) {
1951     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1952       continue;
1953 
1954     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1955       isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1956       const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id));
1957       Args.push_back(SAI->getElementType());
1958       MemoryType.push_back(
1959           ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1960     } else {
1961       static const int UseGlobalMemory = 1;
1962       Args.push_back(Builder.getInt8PtrTy(UseGlobalMemory));
1963       MemoryType.push_back(
1964           ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 1)));
1965     }
1966   }
1967 
1968   int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1969 
1970   for (long i = 0; i < NumHostIters; i++) {
1971     Args.push_back(Builder.getInt64Ty());
1972     MemoryType.push_back(
1973         ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1974   }
1975 
1976   int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1977 
1978   for (long i = 0; i < NumVars; i++) {
1979     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1980     Value *Val = IDToValue[Id];
1981     isl_id_free(Id);
1982     Args.push_back(Val->getType());
1983     MemoryType.push_back(
1984         ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1985   }
1986 
1987   for (auto *V : SubtreeValues) {
1988     Args.push_back(V->getType());
1989     MemoryType.push_back(
1990         ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1991   }
1992 
1993   auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false);
1994   auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier,
1995                               GPUModule.get());
1996 
1997   std::vector<Metadata *> EmptyStrings;
1998 
1999   for (unsigned int i = 0; i < MemoryType.size(); i++) {
2000     EmptyStrings.push_back(MDString::get(FN->getContext(), ""));
2001   }
2002 
2003   if (Arch == GPUArch::SPIR32 || Arch == GPUArch::SPIR64) {
2004     FN->setMetadata("kernel_arg_addr_space",
2005                     MDNode::get(FN->getContext(), MemoryType));
2006     FN->setMetadata("kernel_arg_name",
2007                     MDNode::get(FN->getContext(), EmptyStrings));
2008     FN->setMetadata("kernel_arg_access_qual",
2009                     MDNode::get(FN->getContext(), EmptyStrings));
2010     FN->setMetadata("kernel_arg_type",
2011                     MDNode::get(FN->getContext(), EmptyStrings));
2012     FN->setMetadata("kernel_arg_type_qual",
2013                     MDNode::get(FN->getContext(), EmptyStrings));
2014     FN->setMetadata("kernel_arg_base_type",
2015                     MDNode::get(FN->getContext(), EmptyStrings));
2016   }
2017 
2018   switch (Arch) {
2019   case GPUArch::NVPTX64:
2020     FN->setCallingConv(CallingConv::PTX_Kernel);
2021     break;
2022   case GPUArch::SPIR32:
2023   case GPUArch::SPIR64:
2024     FN->setCallingConv(CallingConv::SPIR_KERNEL);
2025     break;
2026   }
2027 
2028   auto Arg = FN->arg_begin();
2029   for (long i = 0; i < Kernel->n_array; i++) {
2030     if (!ppcg_kernel_requires_array_argument(Kernel, i))
2031       continue;
2032 
2033     Arg->setName(Kernel->array[i].array->name);
2034 
2035     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2036     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2037     Type *EleTy = SAI->getElementType();
2038     Value *Val = &*Arg;
2039     SmallVector<const SCEV *, 4> Sizes;
2040     isl_ast_build *Build =
2041         isl_ast_build_from_context(isl_set_copy(Prog->context));
2042     Sizes.push_back(nullptr);
2043     for (long j = 1, n = Kernel->array[i].array->n_index; j < n; j++) {
2044       isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff(
2045           Build, isl_multi_pw_aff_get_pw_aff(Kernel->array[i].array->bound, j));
2046       auto V = ExprBuilder.create(DimSize);
2047       Sizes.push_back(SE.getSCEV(V));
2048     }
2049     const ScopArrayInfo *SAIRep =
2050         S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, MemoryKind::Array);
2051     LocalArrays.push_back(Val);
2052 
2053     isl_ast_build_free(Build);
2054     KernelIds.push_back(Id);
2055     IDToSAI[Id] = SAIRep;
2056     Arg++;
2057   }
2058 
2059   for (long i = 0; i < NumHostIters; i++) {
2060     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
2061     Arg->setName(isl_id_get_name(Id));
2062     IDToValue[Id] = &*Arg;
2063     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2064     Arg++;
2065   }
2066 
2067   for (long i = 0; i < NumVars; i++) {
2068     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
2069     Arg->setName(isl_id_get_name(Id));
2070     Value *Val = IDToValue[Id];
2071     ValueMap[Val] = &*Arg;
2072     IDToValue[Id] = &*Arg;
2073     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2074     Arg++;
2075   }
2076 
2077   for (auto *V : SubtreeValues) {
2078     Arg->setName(V->getName());
2079     ValueMap[V] = &*Arg;
2080     Arg++;
2081   }
2082 
2083   return FN;
2084 }
2085 
2086 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) {
2087   Intrinsic::ID IntrinsicsBID[2];
2088   Intrinsic::ID IntrinsicsTID[3];
2089 
2090   switch (Arch) {
2091   case GPUArch::SPIR64:
2092   case GPUArch::SPIR32:
2093     llvm_unreachable("Cannot generate NVVM intrinsics for SPIR");
2094   case GPUArch::NVPTX64:
2095     IntrinsicsBID[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x;
2096     IntrinsicsBID[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y;
2097 
2098     IntrinsicsTID[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x;
2099     IntrinsicsTID[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y;
2100     IntrinsicsTID[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z;
2101     break;
2102   }
2103 
2104   auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable {
2105     std::string Name = isl_id_get_name(Id);
2106     Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2107     Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr);
2108     Value *Val = Builder.CreateCall(IntrinsicFn, {});
2109     Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
2110     IDToValue[Id] = Val;
2111     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2112   };
2113 
2114   for (int i = 0; i < Kernel->n_grid; ++i) {
2115     isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i);
2116     addId(Id, IntrinsicsBID[i]);
2117   }
2118 
2119   for (int i = 0; i < Kernel->n_block; ++i) {
2120     isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i);
2121     addId(Id, IntrinsicsTID[i]);
2122   }
2123 }
2124 
2125 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel *Kernel,
2126                                            bool SizeTypeIs64bit) {
2127   const char *GroupName[3] = {"__gen_ocl_get_group_id0",
2128                               "__gen_ocl_get_group_id1",
2129                               "__gen_ocl_get_group_id2"};
2130 
2131   const char *LocalName[3] = {"__gen_ocl_get_local_id0",
2132                               "__gen_ocl_get_local_id1",
2133                               "__gen_ocl_get_local_id2"};
2134   IntegerType *SizeT =
2135       SizeTypeIs64bit ? Builder.getInt64Ty() : Builder.getInt32Ty();
2136 
2137   auto createFunc = [this](const char *Name, __isl_take isl_id *Id,
2138                            IntegerType *SizeT) mutable {
2139     Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2140     Function *FN = M->getFunction(Name);
2141 
2142     // If FN is not available, declare it.
2143     if (!FN) {
2144       GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
2145       std::vector<Type *> Args;
2146       FunctionType *Ty = FunctionType::get(SizeT, Args, false);
2147       FN = Function::Create(Ty, Linkage, Name, M);
2148       FN->setCallingConv(CallingConv::SPIR_FUNC);
2149     }
2150 
2151     Value *Val = Builder.CreateCall(FN, {});
2152     if (SizeT == Builder.getInt32Ty())
2153       Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
2154     IDToValue[Id] = Val;
2155     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2156   };
2157 
2158   for (int i = 0; i < Kernel->n_grid; ++i)
2159     createFunc(GroupName[i], isl_id_list_get_id(Kernel->block_ids, i), SizeT);
2160 
2161   for (int i = 0; i < Kernel->n_block; ++i)
2162     createFunc(LocalName[i], isl_id_list_get_id(Kernel->thread_ids, i), SizeT);
2163 }
2164 
2165 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel *Kernel, Function *FN) {
2166   auto Arg = FN->arg_begin();
2167   for (long i = 0; i < Kernel->n_array; i++) {
2168     if (!ppcg_kernel_requires_array_argument(Kernel, i))
2169       continue;
2170 
2171     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2172     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2173     isl_id_free(Id);
2174 
2175     if (SAI->getNumberOfDimensions() > 0) {
2176       Arg++;
2177       continue;
2178     }
2179 
2180     Value *Val = &*Arg;
2181 
2182     if (!gpu_array_is_read_only_scalar(&Prog->array[i])) {
2183       Type *TypePtr = SAI->getElementType()->getPointerTo();
2184       Value *TypedArgPtr = Builder.CreatePointerCast(Val, TypePtr);
2185       Val = Builder.CreateLoad(SAI->getElementType(), TypedArgPtr);
2186     }
2187 
2188     Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
2189     Builder.CreateStore(Val, Alloca);
2190 
2191     Arg++;
2192   }
2193 }
2194 
2195 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel *Kernel) {
2196   auto *FN = Builder.GetInsertBlock()->getParent();
2197   auto Arg = FN->arg_begin();
2198 
2199   bool StoredScalar = false;
2200   for (long i = 0; i < Kernel->n_array; i++) {
2201     if (!ppcg_kernel_requires_array_argument(Kernel, i))
2202       continue;
2203 
2204     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2205     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2206     isl_id_free(Id);
2207 
2208     if (SAI->getNumberOfDimensions() > 0) {
2209       Arg++;
2210       continue;
2211     }
2212 
2213     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
2214       Arg++;
2215       continue;
2216     }
2217 
2218     Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
2219     Value *ArgPtr = &*Arg;
2220     Type *TypePtr = SAI->getElementType()->getPointerTo();
2221     Value *TypedArgPtr = Builder.CreatePointerCast(ArgPtr, TypePtr);
2222     Value *Val = Builder.CreateLoad(SAI->getElementType(), Alloca);
2223     Builder.CreateStore(Val, TypedArgPtr);
2224     StoredScalar = true;
2225 
2226     Arg++;
2227   }
2228 
2229   if (StoredScalar) {
2230     /// In case more than one thread contains scalar stores, the generated
2231     /// code might be incorrect, if we only store at the end of the kernel.
2232     /// To support this case we need to store these scalars back at each
2233     /// memory store or at least before each kernel barrier.
2234     if (Kernel->n_block != 0 || Kernel->n_grid != 0) {
2235       BuildSuccessful = 0;
2236       LLVM_DEBUG(
2237           dbgs() << getUniqueScopName(&S)
2238                  << " has a store to a scalar value that"
2239                     " would be undefined to run in parallel. Bailing out.\n";);
2240     }
2241   }
2242 }
2243 
2244 void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) {
2245   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2246 
2247   for (int i = 0; i < Kernel->n_var; ++i) {
2248     struct ppcg_kernel_var &Var = Kernel->var[i];
2249     isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set);
2250     Type *EleTy = ScopArrayInfo::getFromId(isl::manage(Id))->getElementType();
2251 
2252     Type *ArrayTy = EleTy;
2253     SmallVector<const SCEV *, 4> Sizes;
2254 
2255     Sizes.push_back(nullptr);
2256     for (unsigned int j = 1; j < Var.array->n_index; ++j) {
2257       isl_val *Val = isl_vec_get_element_val(Var.size, j);
2258       long Bound = isl_val_get_num_si(Val);
2259       isl_val_free(Val);
2260       Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound));
2261     }
2262 
2263     for (int j = Var.array->n_index - 1; j >= 0; --j) {
2264       isl_val *Val = isl_vec_get_element_val(Var.size, j);
2265       long Bound = isl_val_get_num_si(Val);
2266       isl_val_free(Val);
2267       ArrayTy = ArrayType::get(ArrayTy, Bound);
2268     }
2269 
2270     const ScopArrayInfo *SAI;
2271     Value *Allocation;
2272     if (Var.type == ppcg_access_shared) {
2273       auto GlobalVar = new GlobalVariable(
2274           *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name,
2275           nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal, 3);
2276       GlobalVar->setAlignment(llvm::Align(EleTy->getPrimitiveSizeInBits() / 8));
2277       GlobalVar->setInitializer(Constant::getNullValue(ArrayTy));
2278 
2279       Allocation = GlobalVar;
2280     } else if (Var.type == ppcg_access_private) {
2281       Allocation = Builder.CreateAlloca(ArrayTy, 0, "private_array");
2282     } else {
2283       llvm_unreachable("unknown variable type");
2284     }
2285     SAI =
2286         S.getOrCreateScopArrayInfo(Allocation, EleTy, Sizes, MemoryKind::Array);
2287     Id = isl_id_alloc(S.getIslCtx().get(), Var.name, nullptr);
2288     IDToValue[Id] = Allocation;
2289     LocalArrays.push_back(Allocation);
2290     KernelIds.push_back(Id);
2291     IDToSAI[Id] = SAI;
2292   }
2293 }
2294 
2295 void GPUNodeBuilder::createKernelFunction(
2296     ppcg_kernel *Kernel, SetVector<Value *> &SubtreeValues,
2297     SetVector<Function *> &SubtreeFunctions) {
2298   std::string Identifier = getKernelFuncName(Kernel->id);
2299   GPUModule.reset(new Module(Identifier, Builder.getContext()));
2300 
2301   switch (Arch) {
2302   case GPUArch::NVPTX64:
2303     if (Runtime == GPURuntime::CUDA)
2304       GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2305     else if (Runtime == GPURuntime::OpenCL)
2306       GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
2307     GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
2308     break;
2309   case GPUArch::SPIR32:
2310     GPUModule->setTargetTriple(Triple::normalize("spir-unknown-unknown"));
2311     GPUModule->setDataLayout(computeSPIRDataLayout(false /* is64Bit */));
2312     break;
2313   case GPUArch::SPIR64:
2314     GPUModule->setTargetTriple(Triple::normalize("spir64-unknown-unknown"));
2315     GPUModule->setDataLayout(computeSPIRDataLayout(true /* is64Bit */));
2316     break;
2317   }
2318 
2319   Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues);
2320 
2321   BasicBlock *PrevBlock = Builder.GetInsertBlock();
2322   auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN);
2323 
2324   DT.addNewBlock(EntryBlock, PrevBlock);
2325 
2326   Builder.SetInsertPoint(EntryBlock);
2327   Builder.CreateRetVoid();
2328   Builder.SetInsertPoint(EntryBlock, EntryBlock->begin());
2329 
2330   ScopDetection::markFunctionAsInvalid(FN);
2331 
2332   prepareKernelArguments(Kernel, FN);
2333   createKernelVariables(Kernel, FN);
2334 
2335   switch (Arch) {
2336   case GPUArch::NVPTX64:
2337     insertKernelIntrinsics(Kernel);
2338     break;
2339   case GPUArch::SPIR32:
2340     insertKernelCallsSPIR(Kernel, false);
2341     break;
2342   case GPUArch::SPIR64:
2343     insertKernelCallsSPIR(Kernel, true);
2344     break;
2345   }
2346 }
2347 
2348 std::string GPUNodeBuilder::createKernelASM() {
2349   llvm::Triple GPUTriple;
2350 
2351   switch (Arch) {
2352   case GPUArch::NVPTX64:
2353     switch (Runtime) {
2354     case GPURuntime::CUDA:
2355       GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
2356       break;
2357     case GPURuntime::OpenCL:
2358       GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
2359       break;
2360     }
2361     break;
2362   case GPUArch::SPIR64:
2363   case GPUArch::SPIR32:
2364     std::string SPIRAssembly;
2365     raw_string_ostream IROstream(SPIRAssembly);
2366     IROstream << *GPUModule;
2367     IROstream.flush();
2368     return SPIRAssembly;
2369   }
2370 
2371   std::string ErrMsg;
2372   auto GPUTarget = TargetRegistry::lookupTarget(GPUTriple.getTriple(), ErrMsg);
2373 
2374   if (!GPUTarget) {
2375     errs() << ErrMsg << "\n";
2376     return "";
2377   }
2378 
2379   TargetOptions Options;
2380   Options.UnsafeFPMath = FastMath;
2381 
2382   std::string subtarget;
2383 
2384   switch (Arch) {
2385   case GPUArch::NVPTX64:
2386     subtarget = CudaVersion;
2387     break;
2388   case GPUArch::SPIR32:
2389   case GPUArch::SPIR64:
2390     llvm_unreachable("No subtarget for SPIR architecture");
2391   }
2392 
2393   std::unique_ptr<TargetMachine> TargetM(GPUTarget->createTargetMachine(
2394       GPUTriple.getTriple(), subtarget, "", Options, Optional<Reloc::Model>()));
2395 
2396   SmallString<0> ASMString;
2397   raw_svector_ostream ASMStream(ASMString);
2398   llvm::legacy::PassManager PM;
2399 
2400   PM.add(createTargetTransformInfoWrapperPass(TargetM->getTargetIRAnalysis()));
2401 
2402   if (TargetM->addPassesToEmitFile(PM, ASMStream, nullptr, CGFT_AssemblyFile,
2403                                    true /* verify */)) {
2404     errs() << "The target does not support generation of this file type!\n";
2405     return "";
2406   }
2407 
2408   PM.run(*GPUModule);
2409 
2410   return ASMStream.str().str();
2411 }
2412 
2413 bool GPUNodeBuilder::requiresCUDALibDevice() {
2414   bool RequiresLibDevice = false;
2415   for (Function &F : GPUModule->functions()) {
2416     if (!F.isDeclaration())
2417       continue;
2418 
2419     const std::string CUDALibDeviceFunc = getCUDALibDeviceFuntion(F.getName());
2420     if (CUDALibDeviceFunc.length() != 0) {
2421       // We need to handle the case where a module looks like this:
2422       // @expf(..)
2423       // @llvm.exp.f64(..)
2424       // Both of these functions would be renamed to `__nv_expf`.
2425       //
2426       // So, we must first check for the existence of the libdevice function.
2427       // If this exists, we replace our current function with it.
2428       //
2429       // If it does not exist, we rename the current function to the
2430       // libdevice functiono name.
2431       if (Function *Replacement = F.getParent()->getFunction(CUDALibDeviceFunc))
2432         F.replaceAllUsesWith(Replacement);
2433       else
2434         F.setName(CUDALibDeviceFunc);
2435       RequiresLibDevice = true;
2436     }
2437   }
2438 
2439   return RequiresLibDevice;
2440 }
2441 
2442 void GPUNodeBuilder::addCUDALibDevice() {
2443   if (Arch != GPUArch::NVPTX64)
2444     return;
2445 
2446   if (requiresCUDALibDevice()) {
2447     SMDiagnostic Error;
2448 
2449     errs() << CUDALibDevice << "\n";
2450     auto LibDeviceModule =
2451         parseIRFile(CUDALibDevice, Error, GPUModule->getContext());
2452 
2453     if (!LibDeviceModule) {
2454       BuildSuccessful = false;
2455       report_fatal_error("Could not find or load libdevice. Skipping GPU "
2456                          "kernel generation. Please set -polly-acc-libdevice "
2457                          "accordingly.\n");
2458       return;
2459     }
2460 
2461     Linker L(*GPUModule);
2462 
2463     // Set an nvptx64 target triple to avoid linker warnings. The original
2464     // triple of the libdevice files are nvptx-unknown-unknown.
2465     LibDeviceModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2466     L.linkInModule(std::move(LibDeviceModule), Linker::LinkOnlyNeeded);
2467   }
2468 }
2469 
2470 std::string GPUNodeBuilder::finalizeKernelFunction() {
2471 
2472   if (verifyModule(*GPUModule)) {
2473     LLVM_DEBUG(dbgs() << "verifyModule failed on module:\n";
2474                GPUModule->print(dbgs(), nullptr); dbgs() << "\n";);
2475     LLVM_DEBUG(dbgs() << "verifyModule Error:\n";
2476                verifyModule(*GPUModule, &dbgs()););
2477 
2478     if (FailOnVerifyModuleFailure)
2479       llvm_unreachable("VerifyModule failed.");
2480 
2481     BuildSuccessful = false;
2482     return "";
2483   }
2484 
2485   addCUDALibDevice();
2486 
2487   if (DumpKernelIR)
2488     outs() << *GPUModule << "\n";
2489 
2490   if (Arch != GPUArch::SPIR32 && Arch != GPUArch::SPIR64) {
2491     // Optimize module.
2492     llvm::legacy::PassManager OptPasses;
2493     PassManagerBuilder PassBuilder;
2494     PassBuilder.OptLevel = 3;
2495     PassBuilder.SizeLevel = 0;
2496     PassBuilder.populateModulePassManager(OptPasses);
2497     OptPasses.run(*GPUModule);
2498   }
2499 
2500   std::string Assembly = createKernelASM();
2501 
2502   if (DumpKernelASM)
2503     outs() << Assembly << "\n";
2504 
2505   GPUModule.release();
2506   KernelIDs.clear();
2507 
2508   return Assembly;
2509 }
2510 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff`
2511 /// @param PwAffs The list of piecewise affine functions to create an
2512 ///               `isl_pw_aff_list` from. We expect an rvalue ref because
2513 ///               all the isl_pw_aff are used up by this function.
2514 ///
2515 /// @returns  The `isl_pw_aff_list`.
2516 __isl_give isl_pw_aff_list *
2517 createPwAffList(isl_ctx *Context,
2518                 const std::vector<__isl_take isl_pw_aff *> &&PwAffs) {
2519   isl_pw_aff_list *List = isl_pw_aff_list_alloc(Context, PwAffs.size());
2520 
2521   for (unsigned i = 0; i < PwAffs.size(); i++) {
2522     List = isl_pw_aff_list_insert(List, i, PwAffs[i]);
2523   }
2524   return List;
2525 }
2526 
2527 /// Align all the `PwAffs` such that they have the same parameter dimensions.
2528 ///
2529 /// We loop over all `pw_aff` and align all of their spaces together to
2530 /// create a common space for all the `pw_aff`. This common space is the
2531 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start
2532 /// with the given `SeedSpace`.
2533 /// @param PwAffs    The list of piecewise affine functions we want to align.
2534 ///                  This is an rvalue reference because the entire vector is
2535 ///                  used up by the end of the operation.
2536 /// @param SeedSpace The space to start the alignment process with.
2537 /// @returns         A std::pair, whose first element is the aligned space,
2538 ///                  whose second element is the vector of aligned piecewise
2539 ///                  affines.
2540 static std::pair<__isl_give isl_space *, std::vector<__isl_give isl_pw_aff *>>
2541 alignPwAffs(const std::vector<__isl_take isl_pw_aff *> &&PwAffs,
2542             __isl_take isl_space *SeedSpace) {
2543   assert(SeedSpace && "Invalid seed space given.");
2544 
2545   isl_space *AlignSpace = SeedSpace;
2546   for (isl_pw_aff *PwAff : PwAffs) {
2547     isl_space *PwAffSpace = isl_pw_aff_get_domain_space(PwAff);
2548     AlignSpace = isl_space_align_params(AlignSpace, PwAffSpace);
2549   }
2550   std::vector<isl_pw_aff *> AdjustedPwAffs;
2551 
2552   for (unsigned i = 0; i < PwAffs.size(); i++) {
2553     isl_pw_aff *Adjusted = PwAffs[i];
2554     assert(Adjusted && "Invalid pw_aff given.");
2555     Adjusted = isl_pw_aff_align_params(Adjusted, isl_space_copy(AlignSpace));
2556     AdjustedPwAffs.push_back(Adjusted);
2557   }
2558   return std::make_pair(AlignSpace, AdjustedPwAffs);
2559 }
2560 
2561 namespace {
2562 class PPCGCodeGeneration : public ScopPass {
2563 public:
2564   static char ID;
2565 
2566   GPURuntime Runtime = GPURuntime::CUDA;
2567 
2568   GPUArch Architecture = GPUArch::NVPTX64;
2569 
2570   /// The scop that is currently processed.
2571   Scop *S;
2572 
2573   LoopInfo *LI;
2574   DominatorTree *DT;
2575   ScalarEvolution *SE;
2576   const DataLayout *DL;
2577   RegionInfo *RI;
2578 
2579   PPCGCodeGeneration() : ScopPass(ID) {
2580     // Apply defaults.
2581     Runtime = GPURuntimeChoice;
2582     Architecture = GPUArchChoice;
2583   }
2584 
2585   /// Construct compilation options for PPCG.
2586   ///
2587   /// @returns The compilation options.
2588   ppcg_options *createPPCGOptions() {
2589     auto DebugOptions =
2590         (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options));
2591     auto Options = (ppcg_options *)malloc(sizeof(ppcg_options));
2592 
2593     DebugOptions->dump_schedule_constraints = false;
2594     DebugOptions->dump_schedule = false;
2595     DebugOptions->dump_final_schedule = false;
2596     DebugOptions->dump_sizes = false;
2597     DebugOptions->verbose = false;
2598 
2599     Options->debug = DebugOptions;
2600 
2601     Options->group_chains = false;
2602     Options->reschedule = true;
2603     Options->scale_tile_loops = false;
2604     Options->wrap = false;
2605 
2606     Options->non_negative_parameters = false;
2607     Options->ctx = nullptr;
2608     Options->sizes = nullptr;
2609 
2610     Options->tile = true;
2611     Options->tile_size = 32;
2612 
2613     Options->isolate_full_tiles = false;
2614 
2615     Options->use_private_memory = PrivateMemory;
2616     Options->use_shared_memory = SharedMemory;
2617     Options->max_shared_memory = 48 * 1024;
2618 
2619     Options->target = PPCG_TARGET_CUDA;
2620     Options->openmp = false;
2621     Options->linearize_device_arrays = true;
2622     Options->allow_gnu_extensions = false;
2623 
2624     Options->unroll_copy_shared = false;
2625     Options->unroll_gpu_tile = false;
2626     Options->live_range_reordering = true;
2627 
2628     Options->live_range_reordering = true;
2629     Options->hybrid = false;
2630     Options->opencl_compiler_options = nullptr;
2631     Options->opencl_use_gpu = false;
2632     Options->opencl_n_include_file = 0;
2633     Options->opencl_include_files = nullptr;
2634     Options->opencl_print_kernel_types = false;
2635     Options->opencl_embed_kernel_code = false;
2636 
2637     Options->save_schedule_file = nullptr;
2638     Options->load_schedule_file = nullptr;
2639 
2640     return Options;
2641   }
2642 
2643   /// Get a tagged access relation containing all accesses of type @p AccessTy.
2644   ///
2645   /// Instead of a normal access of the form:
2646   ///
2647   ///   Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2648   ///
2649   /// a tagged access has the form
2650   ///
2651   ///   [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2652   ///
2653   /// where 'id' is an additional space that references the memory access that
2654   /// triggered the access.
2655   ///
2656   /// @param AccessTy The type of the memory accesses to collect.
2657   ///
2658   /// @return The relation describing all tagged memory accesses.
2659   isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) {
2660     isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace().release());
2661 
2662     for (auto &Stmt : *S)
2663       for (auto &Acc : Stmt)
2664         if (Acc->getType() == AccessTy) {
2665           isl_map *Relation = Acc->getAccessRelation().release();
2666           Relation =
2667               isl_map_intersect_domain(Relation, Stmt.getDomain().release());
2668 
2669           isl_space *Space = isl_map_get_space(Relation);
2670           Space = isl_space_range(Space);
2671           Space = isl_space_from_range(Space);
2672           Space =
2673               isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release());
2674           isl_map *Universe = isl_map_universe(Space);
2675           Relation = isl_map_domain_product(Relation, Universe);
2676           Accesses = isl_union_map_add_map(Accesses, Relation);
2677         }
2678 
2679     return Accesses;
2680   }
2681 
2682   /// Get the set of all read accesses, tagged with the access id.
2683   ///
2684   /// @see getTaggedAccesses
2685   isl_union_map *getTaggedReads() {
2686     return getTaggedAccesses(MemoryAccess::READ);
2687   }
2688 
2689   /// Get the set of all may (and must) accesses, tagged with the access id.
2690   ///
2691   /// @see getTaggedAccesses
2692   isl_union_map *getTaggedMayWrites() {
2693     return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE),
2694                                getTaggedAccesses(MemoryAccess::MUST_WRITE));
2695   }
2696 
2697   /// Get the set of all must accesses, tagged with the access id.
2698   ///
2699   /// @see getTaggedAccesses
2700   isl_union_map *getTaggedMustWrites() {
2701     return getTaggedAccesses(MemoryAccess::MUST_WRITE);
2702   }
2703 
2704   /// Collect parameter and array names as isl_ids.
2705   ///
2706   /// To reason about the different parameters and arrays used, ppcg requires
2707   /// a list of all isl_ids in use. As PPCG traditionally performs
2708   /// source-to-source compilation each of these isl_ids is mapped to the
2709   /// expression that represents it. As we do not have a corresponding
2710   /// expression in Polly, we just map each id to a 'zero' expression to match
2711   /// the data format that ppcg expects.
2712   ///
2713   /// @returns Retun a map from collected ids to 'zero' ast expressions.
2714   __isl_give isl_id_to_ast_expr *getNames() {
2715     auto *Names = isl_id_to_ast_expr_alloc(
2716         S->getIslCtx().get(),
2717         S->getNumParams() + std::distance(S->array_begin(), S->array_end()));
2718     auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx().get()));
2719 
2720     for (const SCEV *P : S->parameters()) {
2721       isl_id *Id = S->getIdForParam(P).release();
2722       Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2723     }
2724 
2725     for (auto &Array : S->arrays()) {
2726       auto Id = Array->getBasePtrId().release();
2727       Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2728     }
2729 
2730     isl_ast_expr_free(Zero);
2731 
2732     return Names;
2733   }
2734 
2735   /// Create a new PPCG scop from the current scop.
2736   ///
2737   /// The PPCG scop is initialized with data from the current polly::Scop. From
2738   /// this initial data, the data-dependences in the PPCG scop are initialized.
2739   /// We do not use Polly's dependence analysis for now, to ensure we match
2740   /// the PPCG default behaviour more closely.
2741   ///
2742   /// @returns A new ppcg scop.
2743   ppcg_scop *createPPCGScop() {
2744     MustKillsInfo KillsInfo = computeMustKillsInfo(*S);
2745 
2746     auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop));
2747 
2748     PPCGScop->options = createPPCGOptions();
2749     // enable live range reordering
2750     PPCGScop->options->live_range_reordering = 1;
2751 
2752     PPCGScop->start = 0;
2753     PPCGScop->end = 0;
2754 
2755     PPCGScop->context = S->getContext().release();
2756     PPCGScop->domain = S->getDomains().release();
2757     // TODO: investigate this further. PPCG calls collect_call_domains.
2758     PPCGScop->call = isl_union_set_from_set(S->getContext().release());
2759     PPCGScop->tagged_reads = getTaggedReads();
2760     PPCGScop->reads = S->getReads().release();
2761     PPCGScop->live_in = nullptr;
2762     PPCGScop->tagged_may_writes = getTaggedMayWrites();
2763     PPCGScop->may_writes = S->getWrites().release();
2764     PPCGScop->tagged_must_writes = getTaggedMustWrites();
2765     PPCGScop->must_writes = S->getMustWrites().release();
2766     PPCGScop->live_out = nullptr;
2767     PPCGScop->tagged_must_kills = KillsInfo.TaggedMustKills.release();
2768     PPCGScop->must_kills = KillsInfo.MustKills.release();
2769 
2770     PPCGScop->tagger = nullptr;
2771     PPCGScop->independence =
2772         isl_union_map_empty(isl_set_get_space(PPCGScop->context));
2773     PPCGScop->dep_flow = nullptr;
2774     PPCGScop->tagged_dep_flow = nullptr;
2775     PPCGScop->dep_false = nullptr;
2776     PPCGScop->dep_forced = nullptr;
2777     PPCGScop->dep_order = nullptr;
2778     PPCGScop->tagged_dep_order = nullptr;
2779 
2780     PPCGScop->schedule = S->getScheduleTree().release();
2781     // If we have something non-trivial to kill, add it to the schedule
2782     if (KillsInfo.KillsSchedule.get())
2783       PPCGScop->schedule = isl_schedule_sequence(
2784           PPCGScop->schedule, KillsInfo.KillsSchedule.release());
2785 
2786     PPCGScop->names = getNames();
2787     PPCGScop->pet = nullptr;
2788 
2789     compute_tagger(PPCGScop);
2790     compute_dependences(PPCGScop);
2791     eliminate_dead_code(PPCGScop);
2792 
2793     return PPCGScop;
2794   }
2795 
2796   /// Collect the array accesses in a statement.
2797   ///
2798   /// @param Stmt The statement for which to collect the accesses.
2799   ///
2800   /// @returns A list of array accesses.
2801   gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) {
2802     gpu_stmt_access *Accesses = nullptr;
2803 
2804     for (MemoryAccess *Acc : Stmt) {
2805       auto Access =
2806           isl_alloc_type(S->getIslCtx().get(), struct gpu_stmt_access);
2807       Access->read = Acc->isRead();
2808       Access->write = Acc->isWrite();
2809       Access->access = Acc->getAccessRelation().release();
2810       isl_space *Space = isl_map_get_space(Access->access);
2811       Space = isl_space_range(Space);
2812       Space = isl_space_from_range(Space);
2813       Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release());
2814       isl_map *Universe = isl_map_universe(Space);
2815       Access->tagged_access =
2816           isl_map_domain_product(Acc->getAccessRelation().release(), Universe);
2817       Access->exact_write = !Acc->isMayWrite();
2818       Access->ref_id = Acc->getId().release();
2819       Access->next = Accesses;
2820       Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions();
2821       // TODO: Also mark one-element accesses to arrays as fixed-element.
2822       Access->fixed_element =
2823           Acc->isLatestScalarKind() ? isl_bool_true : isl_bool_false;
2824       Accesses = Access;
2825     }
2826 
2827     return Accesses;
2828   }
2829 
2830   /// Collect the list of GPU statements.
2831   ///
2832   /// Each statement has an id, a pointer to the underlying data structure,
2833   /// as well as a list with all memory accesses.
2834   ///
2835   /// TODO: Initialize the list of memory accesses.
2836   ///
2837   /// @returns A linked-list of statements.
2838   gpu_stmt *getStatements() {
2839     gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx().get(), struct gpu_stmt,
2840                                        std::distance(S->begin(), S->end()));
2841 
2842     int i = 0;
2843     for (auto &Stmt : *S) {
2844       gpu_stmt *GPUStmt = &Stmts[i];
2845 
2846       GPUStmt->id = Stmt.getDomainId().release();
2847 
2848       // We use the pet stmt pointer to keep track of the Polly statements.
2849       GPUStmt->stmt = (pet_stmt *)&Stmt;
2850       GPUStmt->accesses = getStmtAccesses(Stmt);
2851       i++;
2852     }
2853 
2854     return Stmts;
2855   }
2856 
2857   /// Derive the extent of an array.
2858   ///
2859   /// The extent of an array is the set of elements that are within the
2860   /// accessed array. For the inner dimensions, the extent constraints are
2861   /// 0 and the size of the corresponding array dimension. For the first
2862   /// (outermost) dimension, the extent constraints are the minimal and maximal
2863   /// subscript value for the first dimension.
2864   ///
2865   /// @param Array The array to derive the extent for.
2866   ///
2867   /// @returns An isl_set describing the extent of the array.
2868   isl::set getExtent(ScopArrayInfo *Array) {
2869     unsigned NumDims = Array->getNumberOfDimensions();
2870 
2871     if (Array->getNumberOfDimensions() == 0)
2872       return isl::set::universe(Array->getSpace());
2873 
2874     isl::union_map Accesses = S->getAccesses(Array);
2875     isl::union_set AccessUSet = Accesses.range();
2876     AccessUSet = AccessUSet.coalesce();
2877     AccessUSet = AccessUSet.detect_equalities();
2878     AccessUSet = AccessUSet.coalesce();
2879 
2880     if (AccessUSet.is_empty())
2881       return isl::set::empty(Array->getSpace());
2882 
2883     isl::set AccessSet = AccessUSet.extract_set(Array->getSpace());
2884 
2885     isl::local_space LS = isl::local_space(Array->getSpace());
2886 
2887     isl::pw_aff Val = isl::aff::var_on_domain(LS, isl::dim::set, 0);
2888     isl::pw_aff OuterMin = AccessSet.dim_min(0);
2889     isl::pw_aff OuterMax = AccessSet.dim_max(0);
2890     OuterMin = OuterMin.add_dims(isl::dim::in,
2891                                  unsignedFromIslSize(Val.dim(isl::dim::in)));
2892     OuterMax = OuterMax.add_dims(isl::dim::in,
2893                                  unsignedFromIslSize(Val.dim(isl::dim::in)));
2894     OuterMin = OuterMin.set_tuple_id(isl::dim::in, Array->getBasePtrId());
2895     OuterMax = OuterMax.set_tuple_id(isl::dim::in, Array->getBasePtrId());
2896 
2897     isl::set Extent = isl::set::universe(Array->getSpace());
2898 
2899     Extent = Extent.intersect(OuterMin.le_set(Val));
2900     Extent = Extent.intersect(OuterMax.ge_set(Val));
2901 
2902     for (unsigned i = 1; i < NumDims; ++i)
2903       Extent = Extent.lower_bound_si(isl::dim::set, i, 0);
2904 
2905     for (unsigned i = 0; i < NumDims; ++i) {
2906       isl::pw_aff PwAff = Array->getDimensionSizePw(i);
2907 
2908       // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2909       // Fortran array will we have a legitimate dimension.
2910       if (PwAff.is_null()) {
2911         assert(i == 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2912         continue;
2913       }
2914 
2915       isl::pw_aff Val = isl::aff::var_on_domain(
2916           isl::local_space(Array->getSpace()), isl::dim::set, i);
2917       PwAff = PwAff.add_dims(isl::dim::in,
2918                              unsignedFromIslSize(Val.dim(isl::dim::in)));
2919       PwAff = PwAff.set_tuple_id(isl::dim::in, Val.get_tuple_id(isl::dim::in));
2920       isl::set Set = PwAff.gt_set(Val);
2921       Extent = Set.intersect(Extent);
2922     }
2923 
2924     return Extent;
2925   }
2926 
2927   /// Derive the bounds of an array.
2928   ///
2929   /// For the first dimension we derive the bound of the array from the extent
2930   /// of this dimension. For inner dimensions we obtain their size directly from
2931   /// ScopArrayInfo.
2932   ///
2933   /// @param PPCGArray The array to compute bounds for.
2934   /// @param Array The polly array from which to take the information.
2935   void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) {
2936     std::vector<isl_pw_aff *> Bounds;
2937 
2938     if (PPCGArray.n_index > 0) {
2939       if (isl_set_is_empty(PPCGArray.extent)) {
2940         isl_set *Dom = isl_set_copy(PPCGArray.extent);
2941         isl_local_space *LS = isl_local_space_from_space(
2942             isl_space_params(isl_set_get_space(Dom)));
2943         isl_set_free(Dom);
2944         isl_pw_aff *Zero = isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS));
2945         Bounds.push_back(Zero);
2946       } else {
2947         isl_set *Dom = isl_set_copy(PPCGArray.extent);
2948         Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1);
2949         isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0);
2950         isl_set_free(Dom);
2951         Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound));
2952         isl_local_space *LS =
2953             isl_local_space_from_space(isl_set_get_space(Dom));
2954         isl_aff *One = isl_aff_zero_on_domain(LS);
2955         One = isl_aff_add_constant_si(One, 1);
2956         Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One));
2957         Bound = isl_pw_aff_gist(Bound, S->getContext().release());
2958         Bounds.push_back(Bound);
2959       }
2960     }
2961 
2962     for (unsigned i = 1; i < PPCGArray.n_index; ++i) {
2963       isl_pw_aff *Bound = Array->getDimensionSizePw(i).release();
2964       auto LS = isl_pw_aff_get_domain_space(Bound);
2965       auto Aff = isl_multi_aff_zero(LS);
2966 
2967       // We need types to work out, which is why we perform this weird dance
2968       // with `Aff` and `Bound`. Consider this example:
2969 
2970       // LS: [p] -> { [] }
2971       // Zero: [p] -> { [] } | Implicitly, is [p] -> { ~ -> [] }.
2972       // This `~` is used to denote a "null space" (which is different from
2973       // a *zero dimensional* space), which is something that ISL does not
2974       // show you when pretty printing.
2975 
2976       // Bound: [p] -> { [] -> [(10p)] } | Here, the [] is a *zero dimensional*
2977       // space, not a "null space" which does not exist at all.
2978 
2979       // When we pullback (precompose) `Bound` with `Zero`, we get:
2980       // Bound . Zero =
2981       //     ([p] -> { [] -> [(10p)] }) . ([p] -> {~ -> [] }) =
2982       //     [p] -> { ~ -> [(10p)] } =
2983       //     [p] -> [(10p)] (as ISL pretty prints it)
2984       // Bound Pullback: [p] -> { [(10p)] }
2985 
2986       // We want this kind of an expression for Bound, without a
2987       // zero dimensional input, but with a "null space" input for the types
2988       // to work out later on, as far as I (Siddharth Bhat) understand.
2989       // I was unable to find a reference to this in the ISL manual.
2990       // References: Tobias Grosser.
2991 
2992       Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff);
2993       Bounds.push_back(Bound);
2994     }
2995 
2996     /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff`
2997     /// to have the same parameter dimensions. So, we need to align them to an
2998     /// appropriate space.
2999     /// Scop::Context is _not_ an appropriate space, because when we have
3000     /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not
3001     /// contain all parameter dimensions.
3002     /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together.
3003     isl_space *SeedAlignSpace = S->getParamSpace().release();
3004     SeedAlignSpace = isl_space_add_dims(SeedAlignSpace, isl_dim_set, 1);
3005 
3006     isl_space *AlignSpace = nullptr;
3007     std::vector<isl_pw_aff *> AlignedBounds;
3008     std::tie(AlignSpace, AlignedBounds) =
3009         alignPwAffs(std::move(Bounds), SeedAlignSpace);
3010 
3011     assert(AlignSpace && "alignPwAffs did not initialise AlignSpace");
3012 
3013     isl_pw_aff_list *BoundsList =
3014         createPwAffList(S->getIslCtx().get(), std::move(AlignedBounds));
3015 
3016     isl_space *BoundsSpace = isl_set_get_space(PPCGArray.extent);
3017     BoundsSpace = isl_space_align_params(BoundsSpace, AlignSpace);
3018 
3019     assert(BoundsSpace && "Unable to access space of array.");
3020     assert(BoundsList && "Unable to access list of bounds.");
3021 
3022     PPCGArray.bound =
3023         isl_multi_pw_aff_from_pw_aff_list(BoundsSpace, BoundsList);
3024     assert(PPCGArray.bound && "PPCGArray.bound was not constructed correctly.");
3025   }
3026 
3027   /// Create the arrays for @p PPCGProg.
3028   ///
3029   /// @param PPCGProg The program to compute the arrays for.
3030   void createArrays(gpu_prog *PPCGProg,
3031                     const SmallVector<ScopArrayInfo *, 4> &ValidSAIs) {
3032     int i = 0;
3033     for (auto &Array : ValidSAIs) {
3034       std::string TypeName;
3035       raw_string_ostream OS(TypeName);
3036 
3037       OS << *Array->getElementType();
3038       TypeName = OS.str();
3039 
3040       gpu_array_info &PPCGArray = PPCGProg->array[i];
3041 
3042       PPCGArray.space = Array->getSpace().release();
3043       PPCGArray.type = strdup(TypeName.c_str());
3044       PPCGArray.size = DL->getTypeAllocSize(Array->getElementType());
3045       PPCGArray.name = strdup(Array->getName().c_str());
3046       PPCGArray.extent = nullptr;
3047       PPCGArray.n_index = Array->getNumberOfDimensions();
3048       PPCGArray.extent = getExtent(Array).release();
3049       PPCGArray.n_ref = 0;
3050       PPCGArray.refs = nullptr;
3051       PPCGArray.accessed = true;
3052       PPCGArray.read_only_scalar =
3053           Array->isReadOnly() && Array->getNumberOfDimensions() == 0;
3054       PPCGArray.has_compound_element = false;
3055       PPCGArray.local = false;
3056       PPCGArray.declare_local = false;
3057       PPCGArray.global = false;
3058       PPCGArray.linearize = false;
3059       PPCGArray.dep_order = nullptr;
3060       PPCGArray.user = Array;
3061 
3062       PPCGArray.bound = nullptr;
3063       setArrayBounds(PPCGArray, Array);
3064       i++;
3065 
3066       collect_references(PPCGProg, &PPCGArray);
3067       PPCGArray.only_fixed_element = only_fixed_element_accessed(&PPCGArray);
3068     }
3069   }
3070 
3071   /// Create an identity map between the arrays in the scop.
3072   ///
3073   /// @returns An identity map between the arrays in the scop.
3074   isl_union_map *getArrayIdentity() {
3075     isl_union_map *Maps = isl_union_map_empty(S->getParamSpace().release());
3076 
3077     for (auto &Array : S->arrays()) {
3078       isl_space *Space = Array->getSpace().release();
3079       Space = isl_space_map_from_set(Space);
3080       isl_map *Identity = isl_map_identity(Space);
3081       Maps = isl_union_map_add_map(Maps, Identity);
3082     }
3083 
3084     return Maps;
3085   }
3086 
3087   /// Create a default-initialized PPCG GPU program.
3088   ///
3089   /// @returns A new gpu program description.
3090   gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) {
3091 
3092     if (!PPCGScop)
3093       return nullptr;
3094 
3095     auto PPCGProg = isl_calloc_type(S->getIslCtx().get(), struct gpu_prog);
3096 
3097     PPCGProg->ctx = S->getIslCtx().get();
3098     PPCGProg->scop = PPCGScop;
3099     PPCGProg->context = isl_set_copy(PPCGScop->context);
3100     PPCGProg->read = isl_union_map_copy(PPCGScop->reads);
3101     PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes);
3102     PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes);
3103     PPCGProg->tagged_must_kill =
3104         isl_union_map_copy(PPCGScop->tagged_must_kills);
3105     PPCGProg->to_inner = getArrayIdentity();
3106     PPCGProg->to_outer = getArrayIdentity();
3107     // TODO: verify that this assignment is correct.
3108     PPCGProg->any_to_outer = nullptr;
3109     PPCGProg->n_stmts = std::distance(S->begin(), S->end());
3110     PPCGProg->stmts = getStatements();
3111 
3112     // Only consider arrays that have a non-empty extent.
3113     // Otherwise, this will cause us to consider the following kinds of
3114     // empty arrays:
3115     //     1. Invariant loads that are represented by SAI objects.
3116     //     2. Arrays with statically known zero size.
3117     auto ValidSAIsRange =
3118         make_filter_range(S->arrays(), [this](ScopArrayInfo *SAI) -> bool {
3119           return !getExtent(SAI).is_empty();
3120         });
3121     SmallVector<ScopArrayInfo *, 4> ValidSAIs(ValidSAIsRange.begin(),
3122                                               ValidSAIsRange.end());
3123 
3124     PPCGProg->n_array =
3125         ValidSAIs.size(); // std::distance(S->array_begin(), S->array_end());
3126     PPCGProg->array = isl_calloc_array(
3127         S->getIslCtx().get(), struct gpu_array_info, PPCGProg->n_array);
3128 
3129     createArrays(PPCGProg, ValidSAIs);
3130 
3131     PPCGProg->array_order = nullptr;
3132     collect_order_dependences(PPCGProg);
3133 
3134     PPCGProg->may_persist = compute_may_persist(PPCGProg);
3135     return PPCGProg;
3136   }
3137 
3138   struct PrintGPUUserData {
3139     struct cuda_info *CudaInfo;
3140     struct gpu_prog *PPCGProg;
3141     std::vector<ppcg_kernel *> Kernels;
3142   };
3143 
3144   /// Print a user statement node in the host code.
3145   ///
3146   /// We use ppcg's printing facilities to print the actual statement and
3147   /// additionally build up a list of all kernels that are encountered in the
3148   /// host ast.
3149   ///
3150   /// @param P The printer to print to
3151   /// @param Options The printing options to use
3152   /// @param Node The node to print
3153   /// @param User A user pointer to carry additional data. This pointer is
3154   ///             expected to be of type PrintGPUUserData.
3155   ///
3156   /// @returns A printer to which the output has been printed.
3157   static __isl_give isl_printer *
3158   printHostUser(__isl_take isl_printer *P,
3159                 __isl_take isl_ast_print_options *Options,
3160                 __isl_take isl_ast_node *Node, void *User) {
3161     auto Data = (struct PrintGPUUserData *)User;
3162     auto Id = isl_ast_node_get_annotation(Node);
3163 
3164     if (Id) {
3165       bool IsUser = !strcmp(isl_id_get_name(Id), "user");
3166 
3167       // If this is a user statement, format it ourselves as ppcg would
3168       // otherwise try to call pet functionality that is not available in
3169       // Polly.
3170       if (IsUser) {
3171         P = isl_printer_start_line(P);
3172         P = isl_printer_print_ast_node(P, Node);
3173         P = isl_printer_end_line(P);
3174         isl_id_free(Id);
3175         isl_ast_print_options_free(Options);
3176         return P;
3177       }
3178 
3179       auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id);
3180       isl_id_free(Id);
3181       Data->Kernels.push_back(Kernel);
3182     }
3183 
3184     return print_host_user(P, Options, Node, User);
3185   }
3186 
3187   /// Print C code corresponding to the control flow in @p Kernel.
3188   ///
3189   /// @param Kernel The kernel to print
3190   void printKernel(ppcg_kernel *Kernel) {
3191     auto *P = isl_printer_to_str(S->getIslCtx().get());
3192     P = isl_printer_set_output_format(P, ISL_FORMAT_C);
3193     auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get());
3194     P = isl_ast_node_print(Kernel->tree, P, Options);
3195     char *String = isl_printer_get_str(P);
3196     outs() << String << "\n";
3197     free(String);
3198     isl_printer_free(P);
3199   }
3200 
3201   /// Print C code corresponding to the GPU code described by @p Tree.
3202   ///
3203   /// @param Tree An AST describing GPU code
3204   /// @param PPCGProg The PPCG program from which @Tree has been constructed.
3205   void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) {
3206     auto *P = isl_printer_to_str(S->getIslCtx().get());
3207     P = isl_printer_set_output_format(P, ISL_FORMAT_C);
3208 
3209     PrintGPUUserData Data;
3210     Data.PPCGProg = PPCGProg;
3211 
3212     auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get());
3213     Options =
3214         isl_ast_print_options_set_print_user(Options, printHostUser, &Data);
3215     P = isl_ast_node_print(Tree, P, Options);
3216     char *String = isl_printer_get_str(P);
3217     outs() << "# host\n";
3218     outs() << String << "\n";
3219     free(String);
3220     isl_printer_free(P);
3221 
3222     for (auto Kernel : Data.Kernels) {
3223       outs() << "# kernel" << Kernel->id << "\n";
3224       printKernel(Kernel);
3225     }
3226   }
3227 
3228   // Generate a GPU program using PPCG.
3229   //
3230   // GPU mapping consists of multiple steps:
3231   //
3232   //  1) Compute new schedule for the program.
3233   //  2) Map schedule to GPU (TODO)
3234   //  3) Generate code for new schedule (TODO)
3235   //
3236   // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
3237   // is mostly CPU specific. Instead, we use PPCG's GPU code generation
3238   // strategy directly from this pass.
3239   gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) {
3240 
3241     auto PPCGGen = isl_calloc_type(S->getIslCtx().get(), struct gpu_gen);
3242 
3243     PPCGGen->ctx = S->getIslCtx().get();
3244     PPCGGen->options = PPCGScop->options;
3245     PPCGGen->print = nullptr;
3246     PPCGGen->print_user = nullptr;
3247     PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt;
3248     PPCGGen->prog = PPCGProg;
3249     PPCGGen->tree = nullptr;
3250     PPCGGen->types.n = 0;
3251     PPCGGen->types.name = nullptr;
3252     PPCGGen->sizes = nullptr;
3253     PPCGGen->used_sizes = nullptr;
3254     PPCGGen->kernel_id = 0;
3255 
3256     // Set scheduling strategy to same strategy PPCG is using.
3257     isl_options_set_schedule_serialize_sccs(PPCGGen->ctx, false);
3258     isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true);
3259     isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true);
3260     isl_options_set_schedule_whole_component(PPCGGen->ctx, false);
3261 
3262     isl_schedule *Schedule = get_schedule(PPCGGen);
3263 
3264     int has_permutable = has_any_permutable_node(Schedule);
3265 
3266     Schedule =
3267         isl_schedule_align_params(Schedule, S->getFullParamSpace().release());
3268 
3269     if (!has_permutable || has_permutable < 0) {
3270       Schedule = isl_schedule_free(Schedule);
3271       LLVM_DEBUG(dbgs() << getUniqueScopName(S)
3272                         << " does not have permutable bands. Bailing out\n";);
3273     } else {
3274       const bool CreateTransferToFromDevice = !PollyManagedMemory;
3275       Schedule = map_to_device(PPCGGen, Schedule, CreateTransferToFromDevice);
3276       PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule));
3277     }
3278 
3279     if (DumpSchedule) {
3280       isl_printer *P = isl_printer_to_str(S->getIslCtx().get());
3281       P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
3282       P = isl_printer_print_str(P, "Schedule\n");
3283       P = isl_printer_print_str(P, "========\n");
3284       if (Schedule)
3285         P = isl_printer_print_schedule(P, Schedule);
3286       else
3287         P = isl_printer_print_str(P, "No schedule found\n");
3288 
3289       outs() << isl_printer_get_str(P) << "\n";
3290       isl_printer_free(P);
3291     }
3292 
3293     if (DumpCode) {
3294       outs() << "Code\n";
3295       outs() << "====\n";
3296       if (PPCGGen->tree)
3297         printGPUTree(PPCGGen->tree, PPCGProg);
3298       else
3299         outs() << "No code generated\n";
3300     }
3301 
3302     isl_schedule_free(Schedule);
3303 
3304     return PPCGGen;
3305   }
3306 
3307   /// Free gpu_gen structure.
3308   ///
3309   /// @param PPCGGen The ppcg_gen object to free.
3310   void freePPCGGen(gpu_gen *PPCGGen) {
3311     isl_ast_node_free(PPCGGen->tree);
3312     isl_union_map_free(PPCGGen->sizes);
3313     isl_union_map_free(PPCGGen->used_sizes);
3314     free(PPCGGen);
3315   }
3316 
3317   /// Free the options in the ppcg scop structure.
3318   ///
3319   /// ppcg is not freeing these options for us. To avoid leaks we do this
3320   /// ourselves.
3321   ///
3322   /// @param PPCGScop The scop referencing the options to free.
3323   void freeOptions(ppcg_scop *PPCGScop) {
3324     free(PPCGScop->options->debug);
3325     PPCGScop->options->debug = nullptr;
3326     free(PPCGScop->options);
3327     PPCGScop->options = nullptr;
3328   }
3329 
3330   /// Approximate the number of points in the set.
3331   ///
3332   /// This function returns an ast expression that overapproximates the number
3333   /// of points in an isl set through the rectangular hull surrounding this set.
3334   ///
3335   /// @param Set   The set to count.
3336   /// @param Build The isl ast build object to use for creating the ast
3337   ///              expression.
3338   ///
3339   /// @returns An approximation of the number of points in the set.
3340   __isl_give isl_ast_expr *approxPointsInSet(__isl_take isl_set *Set,
3341                                              __isl_keep isl_ast_build *Build) {
3342 
3343     isl_val *One = isl_val_int_from_si(isl_set_get_ctx(Set), 1);
3344     auto *Expr = isl_ast_expr_from_val(isl_val_copy(One));
3345 
3346     isl_space *Space = isl_set_get_space(Set);
3347     Space = isl_space_params(Space);
3348     auto *Univ = isl_set_universe(Space);
3349     isl_pw_aff *OneAff = isl_pw_aff_val_on_domain(Univ, One);
3350 
3351     for (long i = 0, n = isl_set_dim(Set, isl_dim_set); i < n; i++) {
3352       isl_pw_aff *Max = isl_set_dim_max(isl_set_copy(Set), i);
3353       isl_pw_aff *Min = isl_set_dim_min(isl_set_copy(Set), i);
3354       isl_pw_aff *DimSize = isl_pw_aff_sub(Max, Min);
3355       DimSize = isl_pw_aff_add(DimSize, isl_pw_aff_copy(OneAff));
3356       auto DimSizeExpr = isl_ast_build_expr_from_pw_aff(Build, DimSize);
3357       Expr = isl_ast_expr_mul(Expr, DimSizeExpr);
3358     }
3359 
3360     isl_set_free(Set);
3361     isl_pw_aff_free(OneAff);
3362 
3363     return Expr;
3364   }
3365 
3366   /// Approximate a number of dynamic instructions executed by a given
3367   /// statement.
3368   ///
3369   /// @param Stmt  The statement for which to compute the number of dynamic
3370   ///              instructions.
3371   /// @param Build The isl ast build object to use for creating the ast
3372   ///              expression.
3373   /// @returns An approximation of the number of dynamic instructions executed
3374   ///          by @p Stmt.
3375   __isl_give isl_ast_expr *approxDynamicInst(ScopStmt &Stmt,
3376                                              __isl_keep isl_ast_build *Build) {
3377     auto Iterations = approxPointsInSet(Stmt.getDomain().release(), Build);
3378 
3379     long InstCount = 0;
3380 
3381     if (Stmt.isBlockStmt()) {
3382       auto *BB = Stmt.getBasicBlock();
3383       InstCount = std::distance(BB->begin(), BB->end());
3384     } else {
3385       auto *R = Stmt.getRegion();
3386 
3387       for (auto *BB : R->blocks()) {
3388         InstCount += std::distance(BB->begin(), BB->end());
3389       }
3390     }
3391 
3392     isl_val *InstVal = isl_val_int_from_si(S->getIslCtx().get(), InstCount);
3393     auto *InstExpr = isl_ast_expr_from_val(InstVal);
3394     return isl_ast_expr_mul(InstExpr, Iterations);
3395   }
3396 
3397   /// Approximate dynamic instructions executed in scop.
3398   ///
3399   /// @param S     The scop for which to approximate dynamic instructions.
3400   /// @param Build The isl ast build object to use for creating the ast
3401   ///              expression.
3402   /// @returns An approximation of the number of dynamic instructions executed
3403   ///          in @p S.
3404   __isl_give isl_ast_expr *
3405   getNumberOfIterations(Scop &S, __isl_keep isl_ast_build *Build) {
3406     isl_ast_expr *Instructions;
3407 
3408     isl_val *Zero = isl_val_int_from_si(S.getIslCtx().get(), 0);
3409     Instructions = isl_ast_expr_from_val(Zero);
3410 
3411     for (ScopStmt &Stmt : S) {
3412       isl_ast_expr *StmtInstructions = approxDynamicInst(Stmt, Build);
3413       Instructions = isl_ast_expr_add(Instructions, StmtInstructions);
3414     }
3415     return Instructions;
3416   }
3417 
3418   /// Create a check that ensures sufficient compute in scop.
3419   ///
3420   /// @param S     The scop for which to ensure sufficient compute.
3421   /// @param Build The isl ast build object to use for creating the ast
3422   ///              expression.
3423   /// @returns An expression that evaluates to TRUE in case of sufficient
3424   ///          compute and to FALSE, otherwise.
3425   __isl_give isl_ast_expr *
3426   createSufficientComputeCheck(Scop &S, __isl_keep isl_ast_build *Build) {
3427     auto Iterations = getNumberOfIterations(S, Build);
3428     auto *MinComputeVal = isl_val_int_from_si(S.getIslCtx().get(), MinCompute);
3429     auto *MinComputeExpr = isl_ast_expr_from_val(MinComputeVal);
3430     return isl_ast_expr_ge(Iterations, MinComputeExpr);
3431   }
3432 
3433   /// Check if the basic block contains a function we cannot codegen for GPU
3434   /// kernels.
3435   ///
3436   /// If this basic block does something with a `Function` other than calling
3437   /// a function that we support in a kernel, return true.
3438   bool containsInvalidKernelFunctionInBlock(const BasicBlock *BB,
3439                                             bool AllowCUDALibDevice) {
3440     for (const Instruction &Inst : *BB) {
3441       const CallInst *Call = dyn_cast<CallInst>(&Inst);
3442       if (Call && isValidFunctionInKernel(Call->getCalledFunction(),
3443                                           AllowCUDALibDevice))
3444         continue;
3445 
3446       for (Value *Op : Inst.operands())
3447         // Look for (<func-type>*) among operands of Inst
3448         if (auto PtrTy = dyn_cast<PointerType>(Op->getType())) {
3449           if (isa<FunctionType>(PtrTy->getPointerElementType())) {
3450             LLVM_DEBUG(dbgs()
3451                        << Inst << " has illegal use of function in kernel.\n");
3452             return true;
3453           }
3454         }
3455     }
3456     return false;
3457   }
3458 
3459   /// Return whether the Scop S uses functions in a way that we do not support.
3460   bool containsInvalidKernelFunction(const Scop &S, bool AllowCUDALibDevice) {
3461     for (auto &Stmt : S) {
3462       if (Stmt.isBlockStmt()) {
3463         if (containsInvalidKernelFunctionInBlock(Stmt.getBasicBlock(),
3464                                                  AllowCUDALibDevice))
3465           return true;
3466       } else {
3467         assert(Stmt.isRegionStmt() &&
3468                "Stmt was neither block nor region statement");
3469         for (const BasicBlock *BB : Stmt.getRegion()->blocks())
3470           if (containsInvalidKernelFunctionInBlock(BB, AllowCUDALibDevice))
3471             return true;
3472       }
3473     }
3474     return false;
3475   }
3476 
3477   /// Generate code for a given GPU AST described by @p Root.
3478   ///
3479   /// @param Root An isl_ast_node pointing to the root of the GPU AST.
3480   /// @param Prog The GPU Program to generate code for.
3481   void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) {
3482     ScopAnnotator Annotator;
3483     Annotator.buildAliasScopes(*S);
3484 
3485     Region *R = &S->getRegion();
3486 
3487     simplifyRegion(R, DT, LI, RI);
3488 
3489     BasicBlock *EnteringBB = R->getEnteringBlock();
3490 
3491     PollyIRBuilder Builder(EnteringBB->getContext(), ConstantFolder(),
3492                            IRInserter(Annotator));
3493     Builder.SetInsertPoint(EnteringBB->getTerminator());
3494 
3495     // Only build the run-time condition and parameters _after_ having
3496     // introduced the conditional branch. This is important as the conditional
3497     // branch will guard the original scop from new induction variables that
3498     // the SCEVExpander may introduce while code generating the parameters and
3499     // which may introduce scalar dependences that prevent us from correctly
3500     // code generating this scop.
3501     BBPair StartExitBlocks;
3502     BranchInst *CondBr = nullptr;
3503     std::tie(StartExitBlocks, CondBr) =
3504         executeScopConditionally(*S, Builder.getTrue(), *DT, *RI, *LI);
3505     BasicBlock *StartBlock = std::get<0>(StartExitBlocks);
3506 
3507     assert(CondBr && "CondBr not initialized by executeScopConditionally");
3508 
3509     GPUNodeBuilder NodeBuilder(Builder, Annotator, *DL, *LI, *SE, *DT, *S,
3510                                StartBlock, Prog, Runtime, Architecture);
3511 
3512     // TODO: Handle LICM
3513     auto SplitBlock = StartBlock->getSinglePredecessor();
3514     Builder.SetInsertPoint(SplitBlock->getTerminator());
3515 
3516     isl_ast_build *Build = isl_ast_build_alloc(S->getIslCtx().get());
3517     isl::ast_expr Condition =
3518         IslAst::buildRunCondition(*S, isl::manage_copy(Build));
3519     isl_ast_expr *SufficientCompute = createSufficientComputeCheck(*S, Build);
3520     Condition =
3521         isl::manage(isl_ast_expr_and(Condition.release(), SufficientCompute));
3522     isl_ast_build_free(Build);
3523 
3524     // preload invariant loads. Note: This should happen before the RTC
3525     // because the RTC may depend on values that are invariant load hoisted.
3526     if (!NodeBuilder.preloadInvariantLoads()) {
3527       // Patch the introduced branch condition to ensure that we always execute
3528       // the original SCoP.
3529       auto *FalseI1 = Builder.getFalse();
3530       auto *SplitBBTerm = Builder.GetInsertBlock()->getTerminator();
3531       SplitBBTerm->setOperand(0, FalseI1);
3532 
3533       LLVM_DEBUG(dbgs() << "preloading invariant loads failed in function: " +
3534                                S->getFunction().getName() +
3535                                " | Scop Region: " + S->getNameStr());
3536       // adjust the dominator tree accordingly.
3537       auto *ExitingBlock = StartBlock->getUniqueSuccessor();
3538       assert(ExitingBlock);
3539       auto *MergeBlock = ExitingBlock->getUniqueSuccessor();
3540       assert(MergeBlock);
3541       polly::markBlockUnreachable(*StartBlock, Builder);
3542       polly::markBlockUnreachable(*ExitingBlock, Builder);
3543       auto *ExitingBB = S->getExitingBlock();
3544       assert(ExitingBB);
3545 
3546       DT->changeImmediateDominator(MergeBlock, ExitingBB);
3547       DT->eraseNode(ExitingBlock);
3548       isl_ast_node_free(Root);
3549     } else {
3550 
3551       if (polly::PerfMonitoring) {
3552         PerfMonitor P(*S, EnteringBB->getParent()->getParent());
3553         P.initialize();
3554         P.insertRegionStart(SplitBlock->getTerminator());
3555 
3556         // TODO: actually think if this is the correct exiting block to place
3557         // the `end` performance marker. Invariant load hoisting changes
3558         // the CFG in a way that I do not precisely understand, so I
3559         // (Siddharth<[email protected]>) should come back to this and
3560         // think about which exiting block to use.
3561         auto *ExitingBlock = StartBlock->getUniqueSuccessor();
3562         assert(ExitingBlock);
3563         BasicBlock *MergeBlock = ExitingBlock->getUniqueSuccessor();
3564         P.insertRegionEnd(MergeBlock->getTerminator());
3565       }
3566 
3567       NodeBuilder.addParameters(S->getContext().release());
3568       Value *RTC = NodeBuilder.createRTC(Condition.release());
3569       Builder.GetInsertBlock()->getTerminator()->setOperand(0, RTC);
3570 
3571       Builder.SetInsertPoint(&*StartBlock->begin());
3572 
3573       NodeBuilder.create(Root);
3574     }
3575 
3576     /// In case a sequential kernel has more surrounding loops as any parallel
3577     /// kernel, the SCoP is probably mostly sequential. Hence, there is no
3578     /// point in running it on a GPU.
3579     if (NodeBuilder.DeepestSequential > NodeBuilder.DeepestParallel)
3580       CondBr->setOperand(0, Builder.getFalse());
3581 
3582     if (!NodeBuilder.BuildSuccessful)
3583       CondBr->setOperand(0, Builder.getFalse());
3584   }
3585 
3586   bool runOnScop(Scop &CurrentScop) override {
3587     S = &CurrentScop;
3588     LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3589     DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3590     SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3591     DL = &S->getRegion().getEntry()->getModule()->getDataLayout();
3592     RI = &getAnalysis<RegionInfoPass>().getRegionInfo();
3593 
3594     LLVM_DEBUG(dbgs() << "PPCGCodeGen running on : " << getUniqueScopName(S)
3595                       << " | loop depth: " << S->getMaxLoopDepth() << "\n");
3596 
3597     // We currently do not support functions other than intrinsics inside
3598     // kernels, as code generation will need to offload function calls to the
3599     // kernel. This may lead to a kernel trying to call a function on the host.
3600     // This also allows us to prevent codegen from trying to take the
3601     // address of an intrinsic function to send to the kernel.
3602     if (containsInvalidKernelFunction(CurrentScop,
3603                                       Architecture == GPUArch::NVPTX64)) {
3604       LLVM_DEBUG(
3605           dbgs() << getUniqueScopName(S)
3606                  << " contains function which cannot be materialised in a GPU "
3607                     "kernel. Bailing out.\n";);
3608       return false;
3609     }
3610 
3611     auto PPCGScop = createPPCGScop();
3612     auto PPCGProg = createPPCGProg(PPCGScop);
3613     auto PPCGGen = generateGPU(PPCGScop, PPCGProg);
3614 
3615     if (PPCGGen->tree) {
3616       generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg);
3617       CurrentScop.markAsToBeSkipped();
3618     } else {
3619       LLVM_DEBUG(dbgs() << getUniqueScopName(S)
3620                         << " has empty PPCGGen->tree. Bailing out.\n");
3621     }
3622 
3623     freeOptions(PPCGScop);
3624     freePPCGGen(PPCGGen);
3625     gpu_prog_free(PPCGProg);
3626     ppcg_scop_free(PPCGScop);
3627 
3628     return true;
3629   }
3630 
3631   void printScop(raw_ostream &, Scop &) const override {}
3632 
3633   void getAnalysisUsage(AnalysisUsage &AU) const override {
3634     ScopPass::getAnalysisUsage(AU);
3635 
3636     AU.addRequired<DominatorTreeWrapperPass>();
3637     AU.addRequired<RegionInfoPass>();
3638     AU.addRequired<ScalarEvolutionWrapperPass>();
3639     AU.addRequired<ScopDetectionWrapperPass>();
3640     AU.addRequired<ScopInfoRegionPass>();
3641     AU.addRequired<LoopInfoWrapperPass>();
3642 
3643     // FIXME: We do not yet add regions for the newly generated code to the
3644     //        region tree.
3645   }
3646 };
3647 } // namespace
3648 
3649 char PPCGCodeGeneration::ID = 1;
3650 
3651 Pass *polly::createPPCGCodeGenerationPass(GPUArch Arch, GPURuntime Runtime) {
3652   PPCGCodeGeneration *generator = new PPCGCodeGeneration();
3653   generator->Runtime = Runtime;
3654   generator->Architecture = Arch;
3655   return generator;
3656 }
3657 
3658 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg",
3659                       "Polly - Apply PPCG translation to SCOP", false, false)
3660 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
3661 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
3662 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
3663 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
3664 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
3665 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
3666 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg",
3667                     "Polly - Apply PPCG translation to SCOP", false, false)
3668