1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
10 // GPU mapping strategy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "polly/CodeGen/PPCGCodeGeneration.h"
15 #include "polly/CodeGen/CodeGeneration.h"
16 #include "polly/CodeGen/IslAst.h"
17 #include "polly/CodeGen/IslNodeBuilder.h"
18 #include "polly/CodeGen/PerfMonitor.h"
19 #include "polly/CodeGen/Utils.h"
20 #include "polly/DependenceInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/ScopInfo.h"
25 #include "polly/Support/SCEVValidator.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/IR/IntrinsicsNVPTX.h"
29 #include "llvm/IR/LegacyPassManager.h"
30 #include "llvm/IR/Verifier.h"
31 #include "llvm/IRReader/IRReader.h"
32 #include "llvm/InitializePasses.h"
33 #include "llvm/Linker/Linker.h"
34 #include "llvm/Support/SourceMgr.h"
35 #include "llvm/Support/TargetRegistry.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 #include "isl/union_map.h"
40 #include <algorithm>
41 
42 extern "C" {
43 #include "ppcg/cuda.h"
44 #include "ppcg/gpu.h"
45 #include "ppcg/ppcg.h"
46 }
47 
48 #include "llvm/Support/Debug.h"
49 
50 using namespace polly;
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "polly-codegen-ppcg"
54 
55 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule",
56                                   cl::desc("Dump the computed GPU Schedule"),
57                                   cl::Hidden, cl::init(false), cl::ZeroOrMore,
58                                   cl::cat(PollyCategory));
59 
60 static cl::opt<bool>
61     DumpCode("polly-acc-dump-code",
62              cl::desc("Dump C code describing the GPU mapping"), cl::Hidden,
63              cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
64 
65 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
66                                   cl::desc("Dump the kernel LLVM-IR"),
67                                   cl::Hidden, cl::init(false), cl::ZeroOrMore,
68                                   cl::cat(PollyCategory));
69 
70 static cl::opt<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
71                                    cl::desc("Dump the kernel assembly code"),
72                                    cl::Hidden, cl::init(false), cl::ZeroOrMore,
73                                    cl::cat(PollyCategory));
74 
75 static cl::opt<bool> FastMath("polly-acc-fastmath",
76                               cl::desc("Allow unsafe math optimizations"),
77                               cl::Hidden, cl::init(false), cl::ZeroOrMore,
78                               cl::cat(PollyCategory));
79 static cl::opt<bool> SharedMemory("polly-acc-use-shared",
80                                   cl::desc("Use shared memory"), cl::Hidden,
81                                   cl::init(false), cl::ZeroOrMore,
82                                   cl::cat(PollyCategory));
83 static cl::opt<bool> PrivateMemory("polly-acc-use-private",
84                                    cl::desc("Use private memory"), cl::Hidden,
85                                    cl::init(false), cl::ZeroOrMore,
86                                    cl::cat(PollyCategory));
87 
88 bool polly::PollyManagedMemory;
89 static cl::opt<bool, true>
90     XManagedMemory("polly-acc-codegen-managed-memory",
91                    cl::desc("Generate Host kernel code assuming"
92                             " that all memory has been"
93                             " declared as managed memory"),
94                    cl::location(PollyManagedMemory), cl::Hidden,
95                    cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
96 
97 static cl::opt<bool>
98     FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure",
99                               cl::desc("Fail and generate a backtrace if"
100                                        " verifyModule fails on the GPU "
101                                        " kernel module."),
102                               cl::Hidden, cl::init(false), cl::ZeroOrMore,
103                               cl::cat(PollyCategory));
104 
105 static cl::opt<std::string> CUDALibDevice(
106     "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden,
107     cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"),
108     cl::ZeroOrMore, cl::cat(PollyCategory));
109 
110 static cl::opt<std::string>
111     CudaVersion("polly-acc-cuda-version",
112                 cl::desc("The CUDA version to compile for"), cl::Hidden,
113                 cl::init("sm_30"), cl::ZeroOrMore, cl::cat(PollyCategory));
114 
115 static cl::opt<int>
116     MinCompute("polly-acc-mincompute",
117                cl::desc("Minimal number of compute statements to run on GPU."),
118                cl::Hidden, cl::init(10 * 512 * 512));
119 
120 GPURuntime polly::GPURuntimeChoice;
121 static cl::opt<GPURuntime, true> XGPURuntimeChoice(
122     "polly-gpu-runtime", cl::desc("The GPU Runtime API to target"),
123     cl::values(clEnumValN(GPURuntime::CUDA, "libcudart",
124                           "use the CUDA Runtime API"),
125                clEnumValN(GPURuntime::OpenCL, "libopencl",
126                           "use the OpenCL Runtime API")),
127     cl::location(polly::GPURuntimeChoice), cl::init(GPURuntime::CUDA),
128     cl::ZeroOrMore, cl::cat(PollyCategory));
129 
130 GPUArch polly::GPUArchChoice;
131 static cl::opt<GPUArch, true>
132     XGPUArchChoice("polly-gpu-arch", cl::desc("The GPU Architecture to target"),
133                    cl::values(clEnumValN(GPUArch::NVPTX64, "nvptx64",
134                                          "target NVIDIA 64-bit architecture"),
135                               clEnumValN(GPUArch::SPIR32, "spir32",
136                                          "target SPIR 32-bit architecture"),
137                               clEnumValN(GPUArch::SPIR64, "spir64",
138                                          "target SPIR 64-bit architecture")),
139                    cl::location(polly::GPUArchChoice),
140                    cl::init(GPUArch::NVPTX64), cl::ZeroOrMore,
141                    cl::cat(PollyCategory));
142 
143 extern bool polly::PerfMonitoring;
144 
145 /// Return  a unique name for a Scop, which is the scop region with the
146 /// function name.
147 std::string getUniqueScopName(const Scop *S) {
148   return "Scop Region: " + S->getNameStr() +
149          " | Function: " + std::string(S->getFunction().getName());
150 }
151 
152 /// Used to store information PPCG wants for kills. This information is
153 /// used by live range reordering.
154 ///
155 /// @see computeLiveRangeReordering
156 /// @see GPUNodeBuilder::createPPCGScop
157 /// @see GPUNodeBuilder::createPPCGProg
158 struct MustKillsInfo {
159   /// Collection of all kill statements that will be sequenced at the end of
160   /// PPCGScop->schedule.
161   ///
162   /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set`
163   /// which merges schedules in *arbitrary* order.
164   /// (we don't care about the order of the kills anyway).
165   isl::schedule KillsSchedule;
166   /// Map from kill statement instances to scalars that need to be
167   /// killed.
168   ///
169   /// We currently derive kill information for:
170   ///  1. phi nodes. PHI nodes are not alive outside the scop and can
171   ///     consequently all be killed.
172   ///  2. Scalar arrays that are not used outside the Scop. This is
173   ///     checked by `isScalarUsesContainedInScop`.
174   /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
175   isl::union_map TaggedMustKills;
176 
177   /// Tagged must kills stripped of the tags.
178   /// [params] -> { Stmt_phantom[]  -> scalar_to_kill[] }
179   isl::union_map MustKills;
180 
181   MustKillsInfo() : KillsSchedule(nullptr) {}
182 };
183 
184 /// Check if SAI's uses are entirely contained within Scop S.
185 /// If a scalar is used only with a Scop, we are free to kill it, as no data
186 /// can flow in/out of the value any more.
187 /// @see computeMustKillsInfo
188 static bool isScalarUsesContainedInScop(const Scop &S,
189                                         const ScopArrayInfo *SAI) {
190   assert(SAI->isValueKind() && "this function only deals with scalars."
191                                " Dealing with arrays required alias analysis");
192 
193   const Region &R = S.getRegion();
194   for (User *U : SAI->getBasePtr()->users()) {
195     Instruction *I = dyn_cast<Instruction>(U);
196     assert(I && "invalid user of scop array info");
197     if (!R.contains(I))
198       return false;
199   }
200   return true;
201 }
202 
203 /// Compute must-kills needed to enable live range reordering with PPCG.
204 ///
205 /// @params S The Scop to compute live range reordering information
206 /// @returns live range reordering information that can be used to setup
207 /// PPCG.
208 static MustKillsInfo computeMustKillsInfo(const Scop &S) {
209   const isl::space ParamSpace = S.getParamSpace();
210   MustKillsInfo Info;
211 
212   // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria:
213   //      1.1 phi nodes in scop.
214   //      1.2 scalars that are only used within the scop
215   SmallVector<isl::id, 4> KillMemIds;
216   for (ScopArrayInfo *SAI : S.arrays()) {
217     if (SAI->isPHIKind() ||
218         (SAI->isValueKind() && isScalarUsesContainedInScop(S, SAI)))
219       KillMemIds.push_back(isl::manage(SAI->getBasePtrId().release()));
220   }
221 
222   Info.TaggedMustKills = isl::union_map::empty(ParamSpace);
223   Info.MustKills = isl::union_map::empty(ParamSpace);
224 
225   // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the
226   // schedule:
227   //     - filter: "[control] -> { }"
228   // So, we choose to not create this to keep the output a little nicer,
229   // at the cost of some code complexity.
230   Info.KillsSchedule = nullptr;
231 
232   for (isl::id &ToKillId : KillMemIds) {
233     isl::id KillStmtId = isl::id::alloc(
234         S.getIslCtx(),
235         std::string("SKill_phantom_").append(ToKillId.get_name()), nullptr);
236 
237     // NOTE: construction of tagged_must_kill:
238     // 2. We need to construct a map:
239     //     [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
240     // To construct this, we use `isl_map_domain_product` on 2 maps`:
241     // 2a. StmtToScalar:
242     //         [param] -> { Stmt_phantom[] -> scalar_to_kill[] }
243     // 2b. PhantomRefToScalar:
244     //         [param] -> { ref_phantom[] -> scalar_to_kill[] }
245     //
246     // Combining these with `isl_map_domain_product` gives us
247     // TaggedMustKill:
248     //     [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
249 
250     // 2a. [param] -> { Stmt[] -> scalar_to_kill[] }
251     isl::map StmtToScalar = isl::map::universe(ParamSpace);
252     StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::in, isl::id(KillStmtId));
253     StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::out, isl::id(ToKillId));
254 
255     isl::id PhantomRefId = isl::id::alloc(
256         S.getIslCtx(), std::string("ref_phantom") + ToKillId.get_name(),
257         nullptr);
258 
259     // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] }
260     isl::map PhantomRefToScalar = isl::map::universe(ParamSpace);
261     PhantomRefToScalar =
262         PhantomRefToScalar.set_tuple_id(isl::dim::in, PhantomRefId);
263     PhantomRefToScalar =
264         PhantomRefToScalar.set_tuple_id(isl::dim::out, ToKillId);
265 
266     // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
267     isl::map TaggedMustKill = StmtToScalar.domain_product(PhantomRefToScalar);
268     Info.TaggedMustKills = Info.TaggedMustKills.unite(TaggedMustKill);
269 
270     // 2. [param] -> { Stmt[] -> scalar_to_kill[] }
271     Info.MustKills = Info.TaggedMustKills.domain_factor_domain();
272 
273     // 3. Create the kill schedule of the form:
274     //     "[param] -> { Stmt_phantom[] }"
275     // Then add this to Info.KillsSchedule.
276     isl::space KillStmtSpace = ParamSpace;
277     KillStmtSpace = KillStmtSpace.set_tuple_id(isl::dim::set, KillStmtId);
278     isl::union_set KillStmtDomain = isl::set::universe(KillStmtSpace);
279 
280     isl::schedule KillSchedule = isl::schedule::from_domain(KillStmtDomain);
281     if (Info.KillsSchedule)
282       Info.KillsSchedule = isl::manage(
283           isl_schedule_set(Info.KillsSchedule.release(), KillSchedule.copy()));
284     else
285       Info.KillsSchedule = KillSchedule;
286   }
287 
288   return Info;
289 }
290 
291 /// Create the ast expressions for a ScopStmt.
292 ///
293 /// This function is a callback for to generate the ast expressions for each
294 /// of the scheduled ScopStmts.
295 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt(
296     void *StmtT, __isl_take isl_ast_build *Build_C,
297     isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA,
298                                        isl_id *Id, void *User),
299     void *UserIndex,
300     isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User),
301     void *UserExpr) {
302 
303   ScopStmt *Stmt = (ScopStmt *)StmtT;
304 
305   if (!Stmt || !Build_C)
306     return NULL;
307 
308   isl::ast_build Build = isl::manage_copy(Build_C);
309   isl::ctx Ctx = Build.get_ctx();
310   isl::id_to_ast_expr RefToExpr = isl::id_to_ast_expr::alloc(Ctx, 0);
311 
312   Stmt->setAstBuild(Build);
313 
314   for (MemoryAccess *Acc : *Stmt) {
315     isl::map AddrFunc = Acc->getAddressFunction();
316     AddrFunc = AddrFunc.intersect_domain(Stmt->getDomain());
317 
318     isl::id RefId = Acc->getId();
319     isl::pw_multi_aff PMA = isl::pw_multi_aff::from_map(AddrFunc);
320 
321     isl::multi_pw_aff MPA = isl::multi_pw_aff(PMA);
322     MPA = MPA.coalesce();
323     MPA = isl::manage(FunctionIndex(MPA.release(), RefId.get(), UserIndex));
324 
325     isl::ast_expr Access = Build.access_from(MPA);
326     Access = isl::manage(FunctionExpr(Access.release(), RefId.get(), UserExpr));
327     RefToExpr = RefToExpr.set(RefId, Access);
328   }
329 
330   return RefToExpr.release();
331 }
332 
333 /// Given a LLVM Type, compute its size in bytes,
334 static int computeSizeInBytes(const Type *T) {
335   int bytes = T->getPrimitiveSizeInBits() / 8;
336   if (bytes == 0)
337     bytes = T->getScalarSizeInBits() / 8;
338   return bytes;
339 }
340 
341 /// Generate code for a GPU specific isl AST.
342 ///
343 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
344 /// generates code for general-purpose AST nodes, with special functionality
345 /// for generating GPU specific user nodes.
346 ///
347 /// @see GPUNodeBuilder::createUser
348 class GPUNodeBuilder : public IslNodeBuilder {
349 public:
350   GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator,
351                  const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE,
352                  DominatorTree &DT, Scop &S, BasicBlock *StartBlock,
353                  gpu_prog *Prog, GPURuntime Runtime, GPUArch Arch)
354       : IslNodeBuilder(Builder, Annotator, DL, LI, SE, DT, S, StartBlock),
355         Prog(Prog), Runtime(Runtime), Arch(Arch) {
356     getExprBuilder().setIDToSAI(&IDToSAI);
357   }
358 
359   /// Create after-run-time-check initialization code.
360   void initializeAfterRTH();
361 
362   /// Finalize the generated scop.
363   void finalize() override;
364 
365   /// Track if the full build process was successful.
366   ///
367   /// This value is set to false, if throughout the build process an error
368   /// occurred which prevents us from generating valid GPU code.
369   bool BuildSuccessful = true;
370 
371   /// The maximal number of loops surrounding a sequential kernel.
372   unsigned DeepestSequential = 0;
373 
374   /// The maximal number of loops surrounding a parallel kernel.
375   unsigned DeepestParallel = 0;
376 
377   /// Return the name to set for the ptx_kernel.
378   std::string getKernelFuncName(int Kernel_id);
379 
380 private:
381   /// A vector of array base pointers for which a new ScopArrayInfo was created.
382   ///
383   /// This vector is used to delete the ScopArrayInfo when it is not needed any
384   /// more.
385   std::vector<Value *> LocalArrays;
386 
387   /// A map from ScopArrays to their corresponding device allocations.
388   std::map<ScopArrayInfo *, Value *> DeviceAllocations;
389 
390   /// The current GPU context.
391   Value *GPUContext;
392 
393   /// The set of isl_ids allocated in the kernel
394   std::vector<isl_id *> KernelIds;
395 
396   /// A module containing GPU code.
397   ///
398   /// This pointer is only set in case we are currently generating GPU code.
399   std::unique_ptr<Module> GPUModule;
400 
401   /// The GPU program we generate code for.
402   gpu_prog *Prog;
403 
404   /// The GPU Runtime implementation to use (OpenCL or CUDA).
405   GPURuntime Runtime;
406 
407   /// The GPU Architecture to target.
408   GPUArch Arch;
409 
410   /// Class to free isl_ids.
411   class IslIdDeleter {
412   public:
413     void operator()(__isl_take isl_id *Id) { isl_id_free(Id); };
414   };
415 
416   /// A set containing all isl_ids allocated in a GPU kernel.
417   ///
418   /// By releasing this set all isl_ids will be freed.
419   std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs;
420 
421   IslExprBuilder::IDToScopArrayInfoTy IDToSAI;
422 
423   /// Create code for user-defined AST nodes.
424   ///
425   /// These AST nodes can be of type:
426   ///
427   ///   - ScopStmt:      A computational statement (TODO)
428   ///   - Kernel:        A GPU kernel call (TODO)
429   ///   - Data-Transfer: A GPU <-> CPU data-transfer
430   ///   - In-kernel synchronization
431   ///   - In-kernel memory copy statement
432   ///
433   /// @param UserStmt The ast node to generate code for.
434   void createUser(__isl_take isl_ast_node *UserStmt) override;
435 
436   void createFor(__isl_take isl_ast_node *Node) override;
437 
438   enum DataDirection { HOST_TO_DEVICE, DEVICE_TO_HOST };
439 
440   /// Create code for a data transfer statement
441   ///
442   /// @param TransferStmt The data transfer statement.
443   /// @param Direction The direction in which to transfer data.
444   void createDataTransfer(__isl_take isl_ast_node *TransferStmt,
445                           enum DataDirection Direction);
446 
447   /// Find llvm::Values referenced in GPU kernel.
448   ///
449   /// @param Kernel The kernel to scan for llvm::Values
450   ///
451   /// @returns A tuple, whose:
452   ///          - First element contains the set of values referenced by the
453   ///            kernel
454   ///          - Second element contains the set of functions referenced by the
455   ///             kernel. All functions in the set satisfy
456   ///             `isValidFunctionInKernel`.
457   ///          - Third element contains loops that have induction variables
458   ///            which are used in the kernel, *and* these loops are *neither*
459   ///            in the scop, nor do they immediately surroung the Scop.
460   ///            See [Code generation of induction variables of loops outside
461   ///            Scops]
462   std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>,
463              isl::space>
464   getReferencesInKernel(ppcg_kernel *Kernel);
465 
466   /// Compute the sizes of the execution grid for a given kernel.
467   ///
468   /// @param Kernel The kernel to compute grid sizes for.
469   ///
470   /// @returns A tuple with grid sizes for X and Y dimension
471   std::tuple<Value *, Value *> getGridSizes(ppcg_kernel *Kernel);
472 
473   /// Get the managed array pointer for sending host pointers to the device.
474   /// \note
475   /// This is to be used only with managed memory
476   Value *getManagedDeviceArray(gpu_array_info *Array, ScopArrayInfo *ArrayInfo);
477 
478   /// Compute the sizes of the thread blocks for a given kernel.
479   ///
480   /// @param Kernel The kernel to compute thread block sizes for.
481   ///
482   /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
483   std::tuple<Value *, Value *, Value *> getBlockSizes(ppcg_kernel *Kernel);
484 
485   /// Store a specific kernel launch parameter in the array of kernel launch
486   /// parameters.
487   ///
488   /// @param Parameters The list of parameters in which to store.
489   /// @param Param      The kernel launch parameter to store.
490   /// @param Index      The index in the parameter list, at which to store the
491   ///                   parameter.
492   void insertStoreParameter(Instruction *Parameters, Instruction *Param,
493                             int Index);
494 
495   /// Create kernel launch parameters.
496   ///
497   /// @param Kernel        The kernel to create parameters for.
498   /// @param F             The kernel function that has been created.
499   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
500   ///
501   /// @returns A stack allocated array with pointers to the parameter
502   ///          values that are passed to the kernel.
503   Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F,
504                                 SetVector<Value *> SubtreeValues);
505 
506   /// Create declarations for kernel variable.
507   ///
508   /// This includes shared memory declarations.
509   ///
510   /// @param Kernel        The kernel definition to create variables for.
511   /// @param FN            The function into which to generate the variables.
512   void createKernelVariables(ppcg_kernel *Kernel, Function *FN);
513 
514   /// Add CUDA annotations to module.
515   ///
516   /// Add a set of CUDA annotations that declares the maximal block dimensions
517   /// that will be used to execute the CUDA kernel. This allows the NVIDIA
518   /// PTX compiler to bound the number of allocated registers to ensure the
519   /// resulting kernel is known to run with up to as many block dimensions
520   /// as specified here.
521   ///
522   /// @param M         The module to add the annotations to.
523   /// @param BlockDimX The size of block dimension X.
524   /// @param BlockDimY The size of block dimension Y.
525   /// @param BlockDimZ The size of block dimension Z.
526   void addCUDAAnnotations(Module *M, Value *BlockDimX, Value *BlockDimY,
527                           Value *BlockDimZ);
528 
529   /// Create GPU kernel.
530   ///
531   /// Code generate the kernel described by @p KernelStmt.
532   ///
533   /// @param KernelStmt The ast node to generate kernel code for.
534   void createKernel(__isl_take isl_ast_node *KernelStmt);
535 
536   /// Generate code that computes the size of an array.
537   ///
538   /// @param Array The array for which to compute a size.
539   Value *getArraySize(gpu_array_info *Array);
540 
541   /// Generate code to compute the minimal offset at which an array is accessed.
542   ///
543   /// The offset of an array is the minimal array location accessed in a scop.
544   ///
545   /// Example:
546   ///
547   ///   for (long i = 0; i < 100; i++)
548   ///     A[i + 42] += ...
549   ///
550   ///   getArrayOffset(A) results in 42.
551   ///
552   /// @param Array The array for which to compute the offset.
553   /// @returns An llvm::Value that contains the offset of the array.
554   Value *getArrayOffset(gpu_array_info *Array);
555 
556   /// Prepare the kernel arguments for kernel code generation
557   ///
558   /// @param Kernel The kernel to generate code for.
559   /// @param FN     The function created for the kernel.
560   void prepareKernelArguments(ppcg_kernel *Kernel, Function *FN);
561 
562   /// Create kernel function.
563   ///
564   /// Create a kernel function located in a newly created module that can serve
565   /// as target for device code generation. Set the Builder to point to the
566   /// start block of this newly created function.
567   ///
568   /// @param Kernel The kernel to generate code for.
569   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
570   /// @param SubtreeFunctions The set of llvm::Functions referenced by this
571   ///                         kernel.
572   void createKernelFunction(ppcg_kernel *Kernel,
573                             SetVector<Value *> &SubtreeValues,
574                             SetVector<Function *> &SubtreeFunctions);
575 
576   /// Create the declaration of a kernel function.
577   ///
578   /// The kernel function takes as arguments:
579   ///
580   ///   - One i8 pointer for each external array reference used in the kernel.
581   ///   - Host iterators
582   ///   - Parameters
583   ///   - Other LLVM Value references (TODO)
584   ///
585   /// @param Kernel The kernel to generate the function declaration for.
586   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
587   ///
588   /// @returns The newly declared function.
589   Function *createKernelFunctionDecl(ppcg_kernel *Kernel,
590                                      SetVector<Value *> &SubtreeValues);
591 
592   /// Insert intrinsic functions to obtain thread and block ids.
593   ///
594   /// @param The kernel to generate the intrinsic functions for.
595   void insertKernelIntrinsics(ppcg_kernel *Kernel);
596 
597   /// Insert function calls to retrieve the SPIR group/local ids.
598   ///
599   /// @param Kernel The kernel to generate the function calls for.
600   /// @param SizeTypeIs64Bit Whether size_t of the openCl device is 64bit.
601   void insertKernelCallsSPIR(ppcg_kernel *Kernel, bool SizeTypeIs64bit);
602 
603   /// Setup the creation of functions referenced by the GPU kernel.
604   ///
605   /// 1. Create new function declarations in GPUModule which are the same as
606   /// SubtreeFunctions.
607   ///
608   /// 2. Populate IslNodeBuilder::ValueMap with mappings from
609   /// old functions (that come from the original module) to new functions
610   /// (that are created within GPUModule). That way, we generate references
611   /// to the correct function (in GPUModule) in BlockGenerator.
612   ///
613   /// @see IslNodeBuilder::ValueMap
614   /// @see BlockGenerator::GlobalMap
615   /// @see BlockGenerator::getNewValue
616   /// @see GPUNodeBuilder::getReferencesInKernel.
617   ///
618   /// @param SubtreeFunctions The set of llvm::Functions referenced by
619   ///                         this kernel.
620   void setupKernelSubtreeFunctions(SetVector<Function *> SubtreeFunctions);
621 
622   /// Create a global-to-shared or shared-to-global copy statement.
623   ///
624   /// @param CopyStmt The copy statement to generate code for
625   void createKernelCopy(ppcg_kernel_stmt *CopyStmt);
626 
627   /// Create code for a ScopStmt called in @p Expr.
628   ///
629   /// @param Expr The expression containing the call.
630   /// @param KernelStmt The kernel statement referenced in the call.
631   void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt);
632 
633   /// Create an in-kernel synchronization call.
634   void createKernelSync();
635 
636   /// Create a PTX assembly string for the current GPU kernel.
637   ///
638   /// @returns A string containing the corresponding PTX assembly code.
639   std::string createKernelASM();
640 
641   /// Remove references from the dominator tree to the kernel function @p F.
642   ///
643   /// @param F The function to remove references to.
644   void clearDominators(Function *F);
645 
646   /// Remove references from scalar evolution to the kernel function @p F.
647   ///
648   /// @param F The function to remove references to.
649   void clearScalarEvolution(Function *F);
650 
651   /// Remove references from loop info to the kernel function @p F.
652   ///
653   /// @param F The function to remove references to.
654   void clearLoops(Function *F);
655 
656   /// Check if the scop requires to be linked with CUDA's libdevice.
657   bool requiresCUDALibDevice();
658 
659   /// Link with the NVIDIA libdevice library (if needed and available).
660   void addCUDALibDevice();
661 
662   /// Finalize the generation of the kernel function.
663   ///
664   /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
665   /// dump its IR to stderr.
666   ///
667   /// @returns The Assembly string of the kernel.
668   std::string finalizeKernelFunction();
669 
670   /// Finalize the generation of the kernel arguments.
671   ///
672   /// This function ensures that not-read-only scalars used in a kernel are
673   /// stored back to the global memory location they are backed with before
674   /// the kernel terminates.
675   ///
676   /// @params Kernel The kernel to finalize kernel arguments for.
677   void finalizeKernelArguments(ppcg_kernel *Kernel);
678 
679   /// Create code that allocates memory to store arrays on device.
680   void allocateDeviceArrays();
681 
682   /// Create code to prepare the managed device pointers.
683   void prepareManagedDeviceArrays();
684 
685   /// Free all allocated device arrays.
686   void freeDeviceArrays();
687 
688   /// Create a call to initialize the GPU context.
689   ///
690   /// @returns A pointer to the newly initialized context.
691   Value *createCallInitContext();
692 
693   /// Create a call to get the device pointer for a kernel allocation.
694   ///
695   /// @param Allocation The Polly GPU allocation
696   ///
697   /// @returns The device parameter corresponding to this allocation.
698   Value *createCallGetDevicePtr(Value *Allocation);
699 
700   /// Create a call to free the GPU context.
701   ///
702   /// @param Context A pointer to an initialized GPU context.
703   void createCallFreeContext(Value *Context);
704 
705   /// Create a call to allocate memory on the device.
706   ///
707   /// @param Size The size of memory to allocate
708   ///
709   /// @returns A pointer that identifies this allocation.
710   Value *createCallAllocateMemoryForDevice(Value *Size);
711 
712   /// Create a call to free a device array.
713   ///
714   /// @param Array The device array to free.
715   void createCallFreeDeviceMemory(Value *Array);
716 
717   /// Create a call to copy data from host to device.
718   ///
719   /// @param HostPtr A pointer to the host data that should be copied.
720   /// @param DevicePtr A device pointer specifying the location to copy to.
721   void createCallCopyFromHostToDevice(Value *HostPtr, Value *DevicePtr,
722                                       Value *Size);
723 
724   /// Create a call to copy data from device to host.
725   ///
726   /// @param DevicePtr A pointer to the device data that should be copied.
727   /// @param HostPtr A host pointer specifying the location to copy to.
728   void createCallCopyFromDeviceToHost(Value *DevicePtr, Value *HostPtr,
729                                       Value *Size);
730 
731   /// Create a call to synchronize Host & Device.
732   /// \note
733   /// This is to be used only with managed memory.
734   void createCallSynchronizeDevice();
735 
736   /// Create a call to get a kernel from an assembly string.
737   ///
738   /// @param Buffer The string describing the kernel.
739   /// @param Entry  The name of the kernel function to call.
740   ///
741   /// @returns A pointer to a kernel object
742   Value *createCallGetKernel(Value *Buffer, Value *Entry);
743 
744   /// Create a call to free a GPU kernel.
745   ///
746   /// @param GPUKernel THe kernel to free.
747   void createCallFreeKernel(Value *GPUKernel);
748 
749   /// Create a call to launch a GPU kernel.
750   ///
751   /// @param GPUKernel  The kernel to launch.
752   /// @param GridDimX   The size of the first grid dimension.
753   /// @param GridDimY   The size of the second grid dimension.
754   /// @param GridBlockX The size of the first block dimension.
755   /// @param GridBlockY The size of the second block dimension.
756   /// @param GridBlockZ The size of the third block dimension.
757   /// @param Parameters A pointer to an array that contains itself pointers to
758   ///                   the parameter values passed for each kernel argument.
759   void createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
760                               Value *GridDimY, Value *BlockDimX,
761                               Value *BlockDimY, Value *BlockDimZ,
762                               Value *Parameters);
763 };
764 
765 std::string GPUNodeBuilder::getKernelFuncName(int Kernel_id) {
766   return "FUNC_" + S.getFunction().getName().str() + "_SCOP_" +
767          std::to_string(S.getID()) + "_KERNEL_" + std::to_string(Kernel_id);
768 }
769 
770 void GPUNodeBuilder::initializeAfterRTH() {
771   BasicBlock *NewBB = SplitBlock(Builder.GetInsertBlock(),
772                                  &*Builder.GetInsertPoint(), &DT, &LI);
773   NewBB->setName("polly.acc.initialize");
774   Builder.SetInsertPoint(&NewBB->front());
775 
776   GPUContext = createCallInitContext();
777 
778   if (!PollyManagedMemory)
779     allocateDeviceArrays();
780   else
781     prepareManagedDeviceArrays();
782 }
783 
784 void GPUNodeBuilder::finalize() {
785   if (!PollyManagedMemory)
786     freeDeviceArrays();
787 
788   createCallFreeContext(GPUContext);
789   IslNodeBuilder::finalize();
790 }
791 
792 void GPUNodeBuilder::allocateDeviceArrays() {
793   assert(!PollyManagedMemory &&
794          "Managed memory will directly send host pointers "
795          "to the kernel. There is no need for device arrays");
796   isl_ast_build *Build = isl_ast_build_from_context(S.getContext().release());
797 
798   for (int i = 0; i < Prog->n_array; ++i) {
799     gpu_array_info *Array = &Prog->array[i];
800     auto *ScopArray = (ScopArrayInfo *)Array->user;
801     std::string DevArrayName("p_dev_array_");
802     DevArrayName.append(Array->name);
803 
804     Value *ArraySize = getArraySize(Array);
805     Value *Offset = getArrayOffset(Array);
806     if (Offset)
807       ArraySize = Builder.CreateSub(
808           ArraySize,
809           Builder.CreateMul(Offset,
810                             Builder.getInt64(ScopArray->getElemSizeInBytes())));
811     const SCEV *SizeSCEV = SE.getSCEV(ArraySize);
812     // It makes no sense to have an array of size 0. The CUDA API will
813     // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We
814     // choose to be defensive and catch this at the compile phase. It is
815     // most likely that we are doing something wrong with size computation.
816     if (SizeSCEV->isZero()) {
817       errs() << getUniqueScopName(&S)
818              << " has computed array size 0: " << *ArraySize
819              << " | for array: " << *(ScopArray->getBasePtr())
820              << ". This is illegal, exiting.\n";
821       report_fatal_error("array size was computed to be 0");
822     }
823 
824     Value *DevArray = createCallAllocateMemoryForDevice(ArraySize);
825     DevArray->setName(DevArrayName);
826     DeviceAllocations[ScopArray] = DevArray;
827   }
828 
829   isl_ast_build_free(Build);
830 }
831 
832 void GPUNodeBuilder::prepareManagedDeviceArrays() {
833   assert(PollyManagedMemory &&
834          "Device array most only be prepared in managed-memory mode");
835   for (int i = 0; i < Prog->n_array; ++i) {
836     gpu_array_info *Array = &Prog->array[i];
837     ScopArrayInfo *ScopArray = (ScopArrayInfo *)Array->user;
838     Value *HostPtr;
839 
840     if (gpu_array_is_scalar(Array))
841       HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
842     else
843       HostPtr = ScopArray->getBasePtr();
844     HostPtr = getLatestValue(HostPtr);
845 
846     Value *Offset = getArrayOffset(Array);
847     if (Offset) {
848       HostPtr = Builder.CreatePointerCast(
849           HostPtr, ScopArray->getElementType()->getPointerTo());
850       HostPtr = Builder.CreateGEP(HostPtr, Offset);
851     }
852 
853     HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
854     DeviceAllocations[ScopArray] = HostPtr;
855   }
856 }
857 
858 void GPUNodeBuilder::addCUDAAnnotations(Module *M, Value *BlockDimX,
859                                         Value *BlockDimY, Value *BlockDimZ) {
860   auto AnnotationNode = M->getOrInsertNamedMetadata("nvvm.annotations");
861 
862   for (auto &F : *M) {
863     if (F.getCallingConv() != CallingConv::PTX_Kernel)
864       continue;
865 
866     Value *V[] = {BlockDimX, BlockDimY, BlockDimZ};
867 
868     Metadata *Elements[] = {
869         ValueAsMetadata::get(&F),   MDString::get(M->getContext(), "maxntidx"),
870         ValueAsMetadata::get(V[0]), MDString::get(M->getContext(), "maxntidy"),
871         ValueAsMetadata::get(V[1]), MDString::get(M->getContext(), "maxntidz"),
872         ValueAsMetadata::get(V[2]),
873     };
874     MDNode *Node = MDNode::get(M->getContext(), Elements);
875     AnnotationNode->addOperand(Node);
876   }
877 }
878 
879 void GPUNodeBuilder::freeDeviceArrays() {
880   assert(!PollyManagedMemory && "Managed memory does not use device arrays");
881   for (auto &Array : DeviceAllocations)
882     createCallFreeDeviceMemory(Array.second);
883 }
884 
885 Value *GPUNodeBuilder::createCallGetKernel(Value *Buffer, Value *Entry) {
886   const char *Name = "polly_getKernel";
887   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
888   Function *F = M->getFunction(Name);
889 
890   // If F is not available, declare it.
891   if (!F) {
892     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
893     std::vector<Type *> Args;
894     Args.push_back(Builder.getInt8PtrTy());
895     Args.push_back(Builder.getInt8PtrTy());
896     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
897     F = Function::Create(Ty, Linkage, Name, M);
898   }
899 
900   return Builder.CreateCall(F, {Buffer, Entry});
901 }
902 
903 Value *GPUNodeBuilder::createCallGetDevicePtr(Value *Allocation) {
904   const char *Name = "polly_getDevicePtr";
905   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
906   Function *F = M->getFunction(Name);
907 
908   // If F is not available, declare it.
909   if (!F) {
910     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
911     std::vector<Type *> Args;
912     Args.push_back(Builder.getInt8PtrTy());
913     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
914     F = Function::Create(Ty, Linkage, Name, M);
915   }
916 
917   return Builder.CreateCall(F, {Allocation});
918 }
919 
920 void GPUNodeBuilder::createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
921                                             Value *GridDimY, Value *BlockDimX,
922                                             Value *BlockDimY, Value *BlockDimZ,
923                                             Value *Parameters) {
924   const char *Name = "polly_launchKernel";
925   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
926   Function *F = M->getFunction(Name);
927 
928   // If F is not available, declare it.
929   if (!F) {
930     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
931     std::vector<Type *> Args;
932     Args.push_back(Builder.getInt8PtrTy());
933     Args.push_back(Builder.getInt32Ty());
934     Args.push_back(Builder.getInt32Ty());
935     Args.push_back(Builder.getInt32Ty());
936     Args.push_back(Builder.getInt32Ty());
937     Args.push_back(Builder.getInt32Ty());
938     Args.push_back(Builder.getInt8PtrTy());
939     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
940     F = Function::Create(Ty, Linkage, Name, M);
941   }
942 
943   Builder.CreateCall(F, {GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
944                          BlockDimZ, Parameters});
945 }
946 
947 void GPUNodeBuilder::createCallFreeKernel(Value *GPUKernel) {
948   const char *Name = "polly_freeKernel";
949   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
950   Function *F = M->getFunction(Name);
951 
952   // If F is not available, declare it.
953   if (!F) {
954     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
955     std::vector<Type *> Args;
956     Args.push_back(Builder.getInt8PtrTy());
957     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
958     F = Function::Create(Ty, Linkage, Name, M);
959   }
960 
961   Builder.CreateCall(F, {GPUKernel});
962 }
963 
964 void GPUNodeBuilder::createCallFreeDeviceMemory(Value *Array) {
965   assert(!PollyManagedMemory &&
966          "Managed memory does not allocate or free memory "
967          "for device");
968   const char *Name = "polly_freeDeviceMemory";
969   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
970   Function *F = M->getFunction(Name);
971 
972   // If F is not available, declare it.
973   if (!F) {
974     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
975     std::vector<Type *> Args;
976     Args.push_back(Builder.getInt8PtrTy());
977     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
978     F = Function::Create(Ty, Linkage, Name, M);
979   }
980 
981   Builder.CreateCall(F, {Array});
982 }
983 
984 Value *GPUNodeBuilder::createCallAllocateMemoryForDevice(Value *Size) {
985   assert(!PollyManagedMemory &&
986          "Managed memory does not allocate or free memory "
987          "for device");
988   const char *Name = "polly_allocateMemoryForDevice";
989   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
990   Function *F = M->getFunction(Name);
991 
992   // If F is not available, declare it.
993   if (!F) {
994     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
995     std::vector<Type *> Args;
996     Args.push_back(Builder.getInt64Ty());
997     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
998     F = Function::Create(Ty, Linkage, Name, M);
999   }
1000 
1001   return Builder.CreateCall(F, {Size});
1002 }
1003 
1004 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value *HostData,
1005                                                     Value *DeviceData,
1006                                                     Value *Size) {
1007   assert(!PollyManagedMemory &&
1008          "Managed memory does not transfer memory between "
1009          "device and host");
1010   const char *Name = "polly_copyFromHostToDevice";
1011   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1012   Function *F = M->getFunction(Name);
1013 
1014   // If F is not available, declare it.
1015   if (!F) {
1016     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1017     std::vector<Type *> Args;
1018     Args.push_back(Builder.getInt8PtrTy());
1019     Args.push_back(Builder.getInt8PtrTy());
1020     Args.push_back(Builder.getInt64Ty());
1021     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1022     F = Function::Create(Ty, Linkage, Name, M);
1023   }
1024 
1025   Builder.CreateCall(F, {HostData, DeviceData, Size});
1026 }
1027 
1028 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value *DeviceData,
1029                                                     Value *HostData,
1030                                                     Value *Size) {
1031   assert(!PollyManagedMemory &&
1032          "Managed memory does not transfer memory between "
1033          "device and host");
1034   const char *Name = "polly_copyFromDeviceToHost";
1035   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1036   Function *F = M->getFunction(Name);
1037 
1038   // If F is not available, declare it.
1039   if (!F) {
1040     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1041     std::vector<Type *> Args;
1042     Args.push_back(Builder.getInt8PtrTy());
1043     Args.push_back(Builder.getInt8PtrTy());
1044     Args.push_back(Builder.getInt64Ty());
1045     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1046     F = Function::Create(Ty, Linkage, Name, M);
1047   }
1048 
1049   Builder.CreateCall(F, {DeviceData, HostData, Size});
1050 }
1051 
1052 void GPUNodeBuilder::createCallSynchronizeDevice() {
1053   assert(PollyManagedMemory && "explicit synchronization is only necessary for "
1054                                "managed memory");
1055   const char *Name = "polly_synchronizeDevice";
1056   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1057   Function *F = M->getFunction(Name);
1058 
1059   // If F is not available, declare it.
1060   if (!F) {
1061     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1062     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), false);
1063     F = Function::Create(Ty, Linkage, Name, M);
1064   }
1065 
1066   Builder.CreateCall(F);
1067 }
1068 
1069 Value *GPUNodeBuilder::createCallInitContext() {
1070   const char *Name;
1071 
1072   switch (Runtime) {
1073   case GPURuntime::CUDA:
1074     Name = "polly_initContextCUDA";
1075     break;
1076   case GPURuntime::OpenCL:
1077     Name = "polly_initContextCL";
1078     break;
1079   }
1080 
1081   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1082   Function *F = M->getFunction(Name);
1083 
1084   // If F is not available, declare it.
1085   if (!F) {
1086     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1087     std::vector<Type *> Args;
1088     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
1089     F = Function::Create(Ty, Linkage, Name, M);
1090   }
1091 
1092   return Builder.CreateCall(F, {});
1093 }
1094 
1095 void GPUNodeBuilder::createCallFreeContext(Value *Context) {
1096   const char *Name = "polly_freeContext";
1097   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1098   Function *F = M->getFunction(Name);
1099 
1100   // If F is not available, declare it.
1101   if (!F) {
1102     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1103     std::vector<Type *> Args;
1104     Args.push_back(Builder.getInt8PtrTy());
1105     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1106     F = Function::Create(Ty, Linkage, Name, M);
1107   }
1108 
1109   Builder.CreateCall(F, {Context});
1110 }
1111 
1112 /// Check if one string is a prefix of another.
1113 ///
1114 /// @param String The string in which to look for the prefix.
1115 /// @param Prefix The prefix to look for.
1116 static bool isPrefix(std::string String, std::string Prefix) {
1117   return String.find(Prefix) == 0;
1118 }
1119 
1120 Value *GPUNodeBuilder::getArraySize(gpu_array_info *Array) {
1121   isl::ast_build Build = isl::ast_build::from_context(S.getContext());
1122   Value *ArraySize = ConstantInt::get(Builder.getInt64Ty(), Array->size);
1123 
1124   if (!gpu_array_is_scalar(Array)) {
1125     isl::multi_pw_aff ArrayBound = isl::manage_copy(Array->bound);
1126 
1127     isl::pw_aff OffsetDimZero = ArrayBound.get_pw_aff(0);
1128     isl::ast_expr Res = Build.expr_from(OffsetDimZero);
1129 
1130     for (unsigned int i = 1; i < Array->n_index; i++) {
1131       isl::pw_aff Bound_I = ArrayBound.get_pw_aff(i);
1132       isl::ast_expr Expr = Build.expr_from(Bound_I);
1133       Res = Res.mul(Expr);
1134     }
1135 
1136     Value *NumElements = ExprBuilder.create(Res.release());
1137     if (NumElements->getType() != ArraySize->getType())
1138       NumElements = Builder.CreateSExt(NumElements, ArraySize->getType());
1139     ArraySize = Builder.CreateMul(ArraySize, NumElements);
1140   }
1141   return ArraySize;
1142 }
1143 
1144 Value *GPUNodeBuilder::getArrayOffset(gpu_array_info *Array) {
1145   if (gpu_array_is_scalar(Array))
1146     return nullptr;
1147 
1148   isl::ast_build Build = isl::ast_build::from_context(S.getContext());
1149 
1150   isl::set Min = isl::manage_copy(Array->extent).lexmin();
1151 
1152   isl::set ZeroSet = isl::set::universe(Min.get_space());
1153 
1154   for (long i = 0, n = Min.dim(isl::dim::set); i < n; i++)
1155     ZeroSet = ZeroSet.fix_si(isl::dim::set, i, 0);
1156 
1157   if (Min.is_subset(ZeroSet)) {
1158     return nullptr;
1159   }
1160 
1161   isl::ast_expr Result = isl::ast_expr::from_val(isl::val(Min.get_ctx(), 0));
1162 
1163   for (long i = 0, n = Min.dim(isl::dim::set); i < n; i++) {
1164     if (i > 0) {
1165       isl::pw_aff Bound_I =
1166           isl::manage(isl_multi_pw_aff_get_pw_aff(Array->bound, i - 1));
1167       isl::ast_expr BExpr = Build.expr_from(Bound_I);
1168       Result = Result.mul(BExpr);
1169     }
1170     isl::pw_aff DimMin = Min.dim_min(i);
1171     isl::ast_expr MExpr = Build.expr_from(DimMin);
1172     Result = Result.add(MExpr);
1173   }
1174 
1175   return ExprBuilder.create(Result.release());
1176 }
1177 
1178 Value *GPUNodeBuilder::getManagedDeviceArray(gpu_array_info *Array,
1179                                              ScopArrayInfo *ArrayInfo) {
1180   assert(PollyManagedMemory && "Only used when you wish to get a host "
1181                                "pointer for sending data to the kernel, "
1182                                "with managed memory");
1183   std::map<ScopArrayInfo *, Value *>::iterator it;
1184   it = DeviceAllocations.find(ArrayInfo);
1185   assert(it != DeviceAllocations.end() &&
1186          "Device array expected to be available");
1187   return it->second;
1188 }
1189 
1190 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node *TransferStmt,
1191                                         enum DataDirection Direction) {
1192   assert(!PollyManagedMemory && "Managed memory needs no data transfers");
1193   isl_ast_expr *Expr = isl_ast_node_user_get_expr(TransferStmt);
1194   isl_ast_expr *Arg = isl_ast_expr_get_op_arg(Expr, 0);
1195   isl_id *Id = isl_ast_expr_get_id(Arg);
1196   auto Array = (gpu_array_info *)isl_id_get_user(Id);
1197   auto ScopArray = (ScopArrayInfo *)(Array->user);
1198 
1199   Value *Size = getArraySize(Array);
1200   Value *Offset = getArrayOffset(Array);
1201   Value *DevPtr = DeviceAllocations[ScopArray];
1202 
1203   Value *HostPtr;
1204 
1205   if (gpu_array_is_scalar(Array))
1206     HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
1207   else
1208     HostPtr = ScopArray->getBasePtr();
1209   HostPtr = getLatestValue(HostPtr);
1210 
1211   if (Offset) {
1212     HostPtr = Builder.CreatePointerCast(
1213         HostPtr, ScopArray->getElementType()->getPointerTo());
1214     HostPtr = Builder.CreateGEP(HostPtr, Offset);
1215   }
1216 
1217   HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
1218 
1219   if (Offset) {
1220     Size = Builder.CreateSub(
1221         Size, Builder.CreateMul(
1222                   Offset, Builder.getInt64(ScopArray->getElemSizeInBytes())));
1223   }
1224 
1225   if (Direction == HOST_TO_DEVICE)
1226     createCallCopyFromHostToDevice(HostPtr, DevPtr, Size);
1227   else
1228     createCallCopyFromDeviceToHost(DevPtr, HostPtr, Size);
1229 
1230   isl_id_free(Id);
1231   isl_ast_expr_free(Arg);
1232   isl_ast_expr_free(Expr);
1233   isl_ast_node_free(TransferStmt);
1234 }
1235 
1236 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) {
1237   isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt);
1238   isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1239   isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1240   isl_id_free(Id);
1241   isl_ast_expr_free(StmtExpr);
1242 
1243   const char *Str = isl_id_get_name(Id);
1244   if (!strcmp(Str, "kernel")) {
1245     createKernel(UserStmt);
1246     if (PollyManagedMemory)
1247       createCallSynchronizeDevice();
1248     isl_ast_expr_free(Expr);
1249     return;
1250   }
1251   if (!strcmp(Str, "init_device")) {
1252     initializeAfterRTH();
1253     isl_ast_node_free(UserStmt);
1254     isl_ast_expr_free(Expr);
1255     return;
1256   }
1257   if (!strcmp(Str, "clear_device")) {
1258     finalize();
1259     isl_ast_node_free(UserStmt);
1260     isl_ast_expr_free(Expr);
1261     return;
1262   }
1263   if (isPrefix(Str, "to_device")) {
1264     if (!PollyManagedMemory)
1265       createDataTransfer(UserStmt, HOST_TO_DEVICE);
1266     else
1267       isl_ast_node_free(UserStmt);
1268 
1269     isl_ast_expr_free(Expr);
1270     return;
1271   }
1272 
1273   if (isPrefix(Str, "from_device")) {
1274     if (!PollyManagedMemory) {
1275       createDataTransfer(UserStmt, DEVICE_TO_HOST);
1276     } else {
1277       isl_ast_node_free(UserStmt);
1278     }
1279     isl_ast_expr_free(Expr);
1280     return;
1281   }
1282 
1283   isl_id *Anno = isl_ast_node_get_annotation(UserStmt);
1284   struct ppcg_kernel_stmt *KernelStmt =
1285       (struct ppcg_kernel_stmt *)isl_id_get_user(Anno);
1286   isl_id_free(Anno);
1287 
1288   switch (KernelStmt->type) {
1289   case ppcg_kernel_domain:
1290     createScopStmt(Expr, KernelStmt);
1291     isl_ast_node_free(UserStmt);
1292     return;
1293   case ppcg_kernel_copy:
1294     createKernelCopy(KernelStmt);
1295     isl_ast_expr_free(Expr);
1296     isl_ast_node_free(UserStmt);
1297     return;
1298   case ppcg_kernel_sync:
1299     createKernelSync();
1300     isl_ast_expr_free(Expr);
1301     isl_ast_node_free(UserStmt);
1302     return;
1303   }
1304 
1305   isl_ast_expr_free(Expr);
1306   isl_ast_node_free(UserStmt);
1307 }
1308 
1309 void GPUNodeBuilder::createFor(__isl_take isl_ast_node *Node) {
1310   createForSequential(isl::manage(Node), false);
1311 }
1312 
1313 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) {
1314   isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index);
1315   LocalIndex = isl_ast_expr_address_of(LocalIndex);
1316   Value *LocalAddr = ExprBuilder.create(LocalIndex);
1317   isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index);
1318   Index = isl_ast_expr_address_of(Index);
1319   Value *GlobalAddr = ExprBuilder.create(Index);
1320   Type *IndexTy = cast<PointerType>(GlobalAddr->getType())->getElementType();
1321 
1322   if (KernelStmt->u.c.read) {
1323     LoadInst *Load = Builder.CreateLoad(IndexTy, GlobalAddr, "shared.read");
1324     Builder.CreateStore(Load, LocalAddr);
1325   } else {
1326     LoadInst *Load = Builder.CreateLoad(IndexTy, LocalAddr, "shared.write");
1327     Builder.CreateStore(Load, GlobalAddr);
1328   }
1329 }
1330 
1331 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr,
1332                                     ppcg_kernel_stmt *KernelStmt) {
1333   auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1334   isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr;
1335 
1336   LoopToScevMapT LTS;
1337   LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end());
1338 
1339   createSubstitutions(Expr, Stmt, LTS);
1340 
1341   if (Stmt->isBlockStmt())
1342     BlockGen.copyStmt(*Stmt, LTS, Indexes);
1343   else
1344     RegionGen.copyStmt(*Stmt, LTS, Indexes);
1345 }
1346 
1347 void GPUNodeBuilder::createKernelSync() {
1348   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1349   const char *SpirName = "__gen_ocl_barrier_global";
1350 
1351   Function *Sync;
1352 
1353   switch (Arch) {
1354   case GPUArch::SPIR64:
1355   case GPUArch::SPIR32:
1356     Sync = M->getFunction(SpirName);
1357 
1358     // If Sync is not available, declare it.
1359     if (!Sync) {
1360       GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
1361       std::vector<Type *> Args;
1362       FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
1363       Sync = Function::Create(Ty, Linkage, SpirName, M);
1364       Sync->setCallingConv(CallingConv::SPIR_FUNC);
1365     }
1366     break;
1367   case GPUArch::NVPTX64:
1368     Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0);
1369     break;
1370   }
1371 
1372   Builder.CreateCall(Sync, {});
1373 }
1374 
1375 /// Collect llvm::Values referenced from @p Node
1376 ///
1377 /// This function only applies to isl_ast_nodes that are user_nodes referring
1378 /// to a ScopStmt. All other node types are ignore.
1379 ///
1380 /// @param Node The node to collect references for.
1381 /// @param User A user pointer used as storage for the data that is collected.
1382 ///
1383 /// @returns isl_bool_true if data could be collected successfully.
1384 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) {
1385   if (isl_ast_node_get_type(Node) != isl_ast_node_user)
1386     return isl_bool_true;
1387 
1388   isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node);
1389   isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1390   isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1391   const char *Str = isl_id_get_name(Id);
1392   isl_id_free(Id);
1393   isl_ast_expr_free(StmtExpr);
1394   isl_ast_expr_free(Expr);
1395 
1396   if (!isPrefix(Str, "Stmt"))
1397     return isl_bool_true;
1398 
1399   Id = isl_ast_node_get_annotation(Node);
1400   auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id);
1401   auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1402   isl_id_free(Id);
1403 
1404   addReferencesFromStmt(Stmt, User, false /* CreateScalarRefs */);
1405 
1406   return isl_bool_true;
1407 }
1408 
1409 /// A list of functions that are available in NVIDIA's libdevice.
1410 const std::set<std::string> CUDALibDeviceFunctions = {
1411     "exp",      "expf",      "expl",      "cos", "cosf", "sqrt", "sqrtf",
1412     "copysign", "copysignf", "copysignl", "log", "logf", "powi", "powif"};
1413 
1414 // A map from intrinsics to their corresponding libdevice functions.
1415 const std::map<std::string, std::string> IntrinsicToLibdeviceFunc = {
1416     {"llvm.exp.f64", "exp"},
1417     {"llvm.exp.f32", "expf"},
1418     {"llvm.powi.f64", "powi"},
1419     {"llvm.powi.f32", "powif"}};
1420 
1421 /// Return the corresponding CUDA libdevice function name @p Name.
1422 /// Note that this function will try to convert instrinsics in the list
1423 /// IntrinsicToLibdeviceFunc into libdevice functions.
1424 /// This is because some intrinsics such as `exp`
1425 /// are not supported by the NVPTX backend.
1426 /// If this restriction of the backend is lifted, we should refactor our code
1427 /// so that we use intrinsics whenever possible.
1428 ///
1429 /// Return "" if we are not compiling for CUDA.
1430 std::string getCUDALibDeviceFuntion(StringRef NameRef) {
1431   std::string Name = NameRef.str();
1432   auto It = IntrinsicToLibdeviceFunc.find(Name);
1433   if (It != IntrinsicToLibdeviceFunc.end())
1434     return getCUDALibDeviceFuntion(It->second);
1435 
1436   if (CUDALibDeviceFunctions.count(Name))
1437     return ("__nv_" + Name);
1438 
1439   return "";
1440 }
1441 
1442 /// Check if F is a function that we can code-generate in a GPU kernel.
1443 static bool isValidFunctionInKernel(llvm::Function *F, bool AllowLibDevice) {
1444   assert(F && "F is an invalid pointer");
1445   // We string compare against the name of the function to allow
1446   // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and
1447   // "llvm.copysign".
1448   const StringRef Name = F->getName();
1449 
1450   if (AllowLibDevice && getCUDALibDeviceFuntion(Name).length() > 0)
1451     return true;
1452 
1453   return F->isIntrinsic() &&
1454          (Name.startswith("llvm.sqrt") || Name.startswith("llvm.fabs") ||
1455           Name.startswith("llvm.copysign"));
1456 }
1457 
1458 /// Do not take `Function` as a subtree value.
1459 ///
1460 /// We try to take the reference of all subtree values and pass them along
1461 /// to the kernel from the host. Taking an address of any function and
1462 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1463 /// `Function`s.
1464 static bool isValidSubtreeValue(llvm::Value *V) { return !isa<Function>(V); }
1465 
1466 /// Return `Function`s from `RawSubtreeValues`.
1467 static SetVector<Function *>
1468 getFunctionsFromRawSubtreeValues(SetVector<Value *> RawSubtreeValues,
1469                                  bool AllowCUDALibDevice) {
1470   SetVector<Function *> SubtreeFunctions;
1471   for (Value *It : RawSubtreeValues) {
1472     Function *F = dyn_cast<Function>(It);
1473     if (F) {
1474       assert(isValidFunctionInKernel(F, AllowCUDALibDevice) &&
1475              "Code should have bailed out by "
1476              "this point if an invalid function "
1477              "were present in a kernel.");
1478       SubtreeFunctions.insert(F);
1479     }
1480   }
1481   return SubtreeFunctions;
1482 }
1483 
1484 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>,
1485            isl::space>
1486 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) {
1487   SetVector<Value *> SubtreeValues;
1488   SetVector<const SCEV *> SCEVs;
1489   SetVector<const Loop *> Loops;
1490   isl::space ParamSpace = isl::space(S.getIslCtx(), 0, 0).params();
1491   SubtreeReferences References = {
1492       LI,         SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator(),
1493       &ParamSpace};
1494 
1495   for (const auto &I : IDToValue)
1496     SubtreeValues.insert(I.second);
1497 
1498   // NOTE: this is populated in IslNodeBuilder::addParameters
1499   // See [Code generation of induction variables of loops outside Scops].
1500   for (const auto &I : OutsideLoopIterations)
1501     SubtreeValues.insert(cast<SCEVUnknown>(I.second)->getValue());
1502 
1503   isl_ast_node_foreach_descendant_top_down(
1504       Kernel->tree, collectReferencesInGPUStmt, &References);
1505 
1506   for (const SCEV *Expr : SCEVs) {
1507     findValues(Expr, SE, SubtreeValues);
1508     findLoops(Expr, Loops);
1509   }
1510 
1511   Loops.remove_if([this](const Loop *L) {
1512     return S.contains(L) || L->contains(S.getEntry());
1513   });
1514 
1515   for (auto &SAI : S.arrays())
1516     SubtreeValues.remove(SAI->getBasePtr());
1517 
1518   isl_space *Space = S.getParamSpace().release();
1519   for (long i = 0, n = isl_space_dim(Space, isl_dim_param); i < n; i++) {
1520     isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i);
1521     assert(IDToValue.count(Id));
1522     Value *Val = IDToValue[Id];
1523     SubtreeValues.remove(Val);
1524     isl_id_free(Id);
1525   }
1526   isl_space_free(Space);
1527 
1528   for (long i = 0, n = isl_space_dim(Kernel->space, isl_dim_set); i < n; i++) {
1529     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1530     assert(IDToValue.count(Id));
1531     Value *Val = IDToValue[Id];
1532     SubtreeValues.remove(Val);
1533     isl_id_free(Id);
1534   }
1535 
1536   // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1537   // SubtreeValues. This is important, because we should not lose any
1538   // SubtreeValues in the process of constructing the
1539   // "ValidSubtree{Values, Functions} sets. Nor should the set
1540   // ValidSubtree{Values, Functions} have any common element.
1541   auto ValidSubtreeValuesIt =
1542       make_filter_range(SubtreeValues, isValidSubtreeValue);
1543   SetVector<Value *> ValidSubtreeValues(ValidSubtreeValuesIt.begin(),
1544                                         ValidSubtreeValuesIt.end());
1545 
1546   bool AllowCUDALibDevice = Arch == GPUArch::NVPTX64;
1547 
1548   SetVector<Function *> ValidSubtreeFunctions(
1549       getFunctionsFromRawSubtreeValues(SubtreeValues, AllowCUDALibDevice));
1550 
1551   // @see IslNodeBuilder::getReferencesInSubtree
1552   SetVector<Value *> ReplacedValues;
1553   for (Value *V : ValidSubtreeValues) {
1554     auto It = ValueMap.find(V);
1555     if (It == ValueMap.end())
1556       ReplacedValues.insert(V);
1557     else
1558       ReplacedValues.insert(It->second);
1559   }
1560   return std::make_tuple(ReplacedValues, ValidSubtreeFunctions, Loops,
1561                          ParamSpace);
1562 }
1563 
1564 void GPUNodeBuilder::clearDominators(Function *F) {
1565   DomTreeNode *N = DT.getNode(&F->getEntryBlock());
1566   std::vector<BasicBlock *> Nodes;
1567   for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I)
1568     Nodes.push_back(I->getBlock());
1569 
1570   for (BasicBlock *BB : Nodes)
1571     DT.eraseNode(BB);
1572 }
1573 
1574 void GPUNodeBuilder::clearScalarEvolution(Function *F) {
1575   for (BasicBlock &BB : *F) {
1576     Loop *L = LI.getLoopFor(&BB);
1577     if (L)
1578       SE.forgetLoop(L);
1579   }
1580 }
1581 
1582 void GPUNodeBuilder::clearLoops(Function *F) {
1583   SmallSet<Loop *, 1> WorkList;
1584   for (BasicBlock &BB : *F) {
1585     Loop *L = LI.getLoopFor(&BB);
1586     if (L)
1587       WorkList.insert(L);
1588   }
1589   for (auto *L : WorkList)
1590     LI.erase(L);
1591 }
1592 
1593 std::tuple<Value *, Value *> GPUNodeBuilder::getGridSizes(ppcg_kernel *Kernel) {
1594   std::vector<Value *> Sizes;
1595   isl::ast_build Context = isl::ast_build::from_context(S.getContext());
1596 
1597   isl::multi_pw_aff GridSizePwAffs = isl::manage_copy(Kernel->grid_size);
1598   for (long i = 0; i < Kernel->n_grid; i++) {
1599     isl::pw_aff Size = GridSizePwAffs.get_pw_aff(i);
1600     isl::ast_expr GridSize = Context.expr_from(Size);
1601     Value *Res = ExprBuilder.create(GridSize.release());
1602     Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
1603     Sizes.push_back(Res);
1604   }
1605 
1606   for (long i = Kernel->n_grid; i < 3; i++)
1607     Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1608 
1609   return std::make_tuple(Sizes[0], Sizes[1]);
1610 }
1611 
1612 std::tuple<Value *, Value *, Value *>
1613 GPUNodeBuilder::getBlockSizes(ppcg_kernel *Kernel) {
1614   std::vector<Value *> Sizes;
1615 
1616   for (long i = 0; i < Kernel->n_block; i++) {
1617     Value *Res = ConstantInt::get(Builder.getInt32Ty(), Kernel->block_dim[i]);
1618     Sizes.push_back(Res);
1619   }
1620 
1621   for (long i = Kernel->n_block; i < 3; i++)
1622     Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1623 
1624   return std::make_tuple(Sizes[0], Sizes[1], Sizes[2]);
1625 }
1626 
1627 void GPUNodeBuilder::insertStoreParameter(Instruction *Parameters,
1628                                           Instruction *Param, int Index) {
1629   Value *Slot = Builder.CreateGEP(
1630       Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1631   Value *ParamTyped = Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1632   Builder.CreateStore(ParamTyped, Slot);
1633 }
1634 
1635 Value *
1636 GPUNodeBuilder::createLaunchParameters(ppcg_kernel *Kernel, Function *F,
1637                                        SetVector<Value *> SubtreeValues) {
1638   const int NumArgs = F->arg_size();
1639   std::vector<int> ArgSizes(NumArgs);
1640 
1641   // If we are using the OpenCL Runtime, we need to add the kernel argument
1642   // sizes to the end of the launch-parameter list, so OpenCL can determine
1643   // how big the respective kernel arguments are.
1644   // Here we need to reserve adequate space for that.
1645   Type *ArrayTy;
1646   if (Runtime == GPURuntime::OpenCL)
1647     ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), 2 * NumArgs);
1648   else
1649     ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumArgs);
1650 
1651   BasicBlock *EntryBlock =
1652       &Builder.GetInsertBlock()->getParent()->getEntryBlock();
1653   auto AddressSpace = F->getParent()->getDataLayout().getAllocaAddrSpace();
1654   std::string Launch = "polly_launch_" + std::to_string(Kernel->id);
1655   Instruction *Parameters = new AllocaInst(
1656       ArrayTy, AddressSpace, Launch + "_params", EntryBlock->getTerminator());
1657 
1658   int Index = 0;
1659   for (long i = 0; i < Prog->n_array; i++) {
1660     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1661       continue;
1662 
1663     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1664     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id));
1665 
1666     if (Runtime == GPURuntime::OpenCL)
1667       ArgSizes[Index] = SAI->getElemSizeInBytes();
1668 
1669     Value *DevArray = nullptr;
1670     if (PollyManagedMemory) {
1671       DevArray = getManagedDeviceArray(&Prog->array[i],
1672                                        const_cast<ScopArrayInfo *>(SAI));
1673     } else {
1674       DevArray = DeviceAllocations[const_cast<ScopArrayInfo *>(SAI)];
1675       DevArray = createCallGetDevicePtr(DevArray);
1676     }
1677     assert(DevArray != nullptr && "Array to be offloaded to device not "
1678                                   "initialized");
1679     Value *Offset = getArrayOffset(&Prog->array[i]);
1680 
1681     if (Offset) {
1682       DevArray = Builder.CreatePointerCast(
1683           DevArray, SAI->getElementType()->getPointerTo());
1684       DevArray = Builder.CreateGEP(DevArray, Builder.CreateNeg(Offset));
1685       DevArray = Builder.CreatePointerCast(DevArray, Builder.getInt8PtrTy());
1686     }
1687     Value *Slot = Builder.CreateGEP(
1688         Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1689 
1690     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1691       Value *ValPtr = nullptr;
1692       if (PollyManagedMemory)
1693         ValPtr = DevArray;
1694       else
1695         ValPtr = BlockGen.getOrCreateAlloca(SAI);
1696 
1697       assert(ValPtr != nullptr && "ValPtr that should point to a valid object"
1698                                   " to be stored into Parameters");
1699       Value *ValPtrCast =
1700           Builder.CreatePointerCast(ValPtr, Builder.getInt8PtrTy());
1701       Builder.CreateStore(ValPtrCast, Slot);
1702     } else {
1703       Instruction *Param =
1704           new AllocaInst(Builder.getInt8PtrTy(), AddressSpace,
1705                          Launch + "_param_" + std::to_string(Index),
1706                          EntryBlock->getTerminator());
1707       Builder.CreateStore(DevArray, Param);
1708       Value *ParamTyped =
1709           Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1710       Builder.CreateStore(ParamTyped, Slot);
1711     }
1712     Index++;
1713   }
1714 
1715   int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1716 
1717   for (long i = 0; i < NumHostIters; i++) {
1718     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1719     Value *Val = IDToValue[Id];
1720     isl_id_free(Id);
1721 
1722     if (Runtime == GPURuntime::OpenCL)
1723       ArgSizes[Index] = computeSizeInBytes(Val->getType());
1724 
1725     Instruction *Param =
1726         new AllocaInst(Val->getType(), AddressSpace,
1727                        Launch + "_param_" + std::to_string(Index),
1728                        EntryBlock->getTerminator());
1729     Builder.CreateStore(Val, Param);
1730     insertStoreParameter(Parameters, Param, Index);
1731     Index++;
1732   }
1733 
1734   int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1735 
1736   for (long i = 0; i < NumVars; i++) {
1737     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1738     Value *Val = IDToValue[Id];
1739     if (ValueMap.count(Val))
1740       Val = ValueMap[Val];
1741     isl_id_free(Id);
1742 
1743     if (Runtime == GPURuntime::OpenCL)
1744       ArgSizes[Index] = computeSizeInBytes(Val->getType());
1745 
1746     Instruction *Param =
1747         new AllocaInst(Val->getType(), AddressSpace,
1748                        Launch + "_param_" + std::to_string(Index),
1749                        EntryBlock->getTerminator());
1750     Builder.CreateStore(Val, Param);
1751     insertStoreParameter(Parameters, Param, Index);
1752     Index++;
1753   }
1754 
1755   for (auto Val : SubtreeValues) {
1756     if (Runtime == GPURuntime::OpenCL)
1757       ArgSizes[Index] = computeSizeInBytes(Val->getType());
1758 
1759     Instruction *Param =
1760         new AllocaInst(Val->getType(), AddressSpace,
1761                        Launch + "_param_" + std::to_string(Index),
1762                        EntryBlock->getTerminator());
1763     Builder.CreateStore(Val, Param);
1764     insertStoreParameter(Parameters, Param, Index);
1765     Index++;
1766   }
1767 
1768   if (Runtime == GPURuntime::OpenCL) {
1769     for (int i = 0; i < NumArgs; i++) {
1770       Value *Val = ConstantInt::get(Builder.getInt32Ty(), ArgSizes[i]);
1771       Instruction *Param =
1772           new AllocaInst(Builder.getInt32Ty(), AddressSpace,
1773                          Launch + "_param_size_" + std::to_string(i),
1774                          EntryBlock->getTerminator());
1775       Builder.CreateStore(Val, Param);
1776       insertStoreParameter(Parameters, Param, Index);
1777       Index++;
1778     }
1779   }
1780 
1781   auto Location = EntryBlock->getTerminator();
1782   return new BitCastInst(Parameters, Builder.getInt8PtrTy(),
1783                          Launch + "_params_i8ptr", Location);
1784 }
1785 
1786 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1787     SetVector<Function *> SubtreeFunctions) {
1788   for (auto Fn : SubtreeFunctions) {
1789     const std::string ClonedFnName = Fn->getName().str();
1790     Function *Clone = GPUModule->getFunction(ClonedFnName);
1791     if (!Clone)
1792       Clone =
1793           Function::Create(Fn->getFunctionType(), GlobalValue::ExternalLinkage,
1794                            ClonedFnName, GPUModule.get());
1795     assert(Clone && "Expected cloned function to be initialized.");
1796     assert(ValueMap.find(Fn) == ValueMap.end() &&
1797            "Fn already present in ValueMap");
1798     ValueMap[Fn] = Clone;
1799   }
1800 }
1801 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) {
1802   isl_id *Id = isl_ast_node_get_annotation(KernelStmt);
1803   ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id);
1804   isl_id_free(Id);
1805   isl_ast_node_free(KernelStmt);
1806 
1807   if (Kernel->n_grid > 1)
1808     DeepestParallel = std::max(
1809         DeepestParallel, (unsigned)isl_space_dim(Kernel->space, isl_dim_set));
1810   else
1811     DeepestSequential = std::max(
1812         DeepestSequential, (unsigned)isl_space_dim(Kernel->space, isl_dim_set));
1813 
1814   Value *BlockDimX, *BlockDimY, *BlockDimZ;
1815   std::tie(BlockDimX, BlockDimY, BlockDimZ) = getBlockSizes(Kernel);
1816 
1817   SetVector<Value *> SubtreeValues;
1818   SetVector<Function *> SubtreeFunctions;
1819   SetVector<const Loop *> Loops;
1820   isl::space ParamSpace;
1821   std::tie(SubtreeValues, SubtreeFunctions, Loops, ParamSpace) =
1822       getReferencesInKernel(Kernel);
1823 
1824   // Add parameters that appear only in the access function to the kernel
1825   // space. This is important to make sure that all isl_ids are passed as
1826   // parameters to the kernel, even though we may not have all parameters
1827   // in the context to improve compile time.
1828   Kernel->space = isl_space_align_params(Kernel->space, ParamSpace.release());
1829 
1830   assert(Kernel->tree && "Device AST of kernel node is empty");
1831 
1832   Instruction &HostInsertPoint = *Builder.GetInsertPoint();
1833   IslExprBuilder::IDToValueTy HostIDs = IDToValue;
1834   ValueMapT HostValueMap = ValueMap;
1835   BlockGenerator::AllocaMapTy HostScalarMap = ScalarMap;
1836   ScalarMap.clear();
1837   BlockGenerator::EscapeUsersAllocaMapTy HostEscapeMap = EscapeMap;
1838   EscapeMap.clear();
1839 
1840   // Create for all loops we depend on values that contain the current loop
1841   // iteration. These values are necessary to generate code for SCEVs that
1842   // depend on such loops. As a result we need to pass them to the subfunction.
1843   for (const Loop *L : Loops) {
1844     const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)),
1845                                             SE.getUnknown(Builder.getInt64(1)),
1846                                             L, SCEV::FlagAnyWrap);
1847     Value *V = generateSCEV(OuterLIV);
1848     OutsideLoopIterations[L] = SE.getUnknown(V);
1849     SubtreeValues.insert(V);
1850   }
1851 
1852   createKernelFunction(Kernel, SubtreeValues, SubtreeFunctions);
1853   setupKernelSubtreeFunctions(SubtreeFunctions);
1854 
1855   create(isl_ast_node_copy(Kernel->tree));
1856 
1857   finalizeKernelArguments(Kernel);
1858   Function *F = Builder.GetInsertBlock()->getParent();
1859   if (Arch == GPUArch::NVPTX64)
1860     addCUDAAnnotations(F->getParent(), BlockDimX, BlockDimY, BlockDimZ);
1861   clearDominators(F);
1862   clearScalarEvolution(F);
1863   clearLoops(F);
1864 
1865   IDToValue = HostIDs;
1866 
1867   ValueMap = std::move(HostValueMap);
1868   ScalarMap = std::move(HostScalarMap);
1869   EscapeMap = std::move(HostEscapeMap);
1870   IDToSAI.clear();
1871   Annotator.resetAlternativeAliasBases();
1872   for (auto &BasePtr : LocalArrays)
1873     S.invalidateScopArrayInfo(BasePtr, MemoryKind::Array);
1874   LocalArrays.clear();
1875 
1876   std::string ASMString = finalizeKernelFunction();
1877   Builder.SetInsertPoint(&HostInsertPoint);
1878   Value *Parameters = createLaunchParameters(Kernel, F, SubtreeValues);
1879 
1880   std::string Name = getKernelFuncName(Kernel->id);
1881   Value *KernelString = Builder.CreateGlobalStringPtr(ASMString, Name);
1882   Value *NameString = Builder.CreateGlobalStringPtr(Name, Name + "_name");
1883   Value *GPUKernel = createCallGetKernel(KernelString, NameString);
1884 
1885   Value *GridDimX, *GridDimY;
1886   std::tie(GridDimX, GridDimY) = getGridSizes(Kernel);
1887 
1888   createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
1889                          BlockDimZ, Parameters);
1890   createCallFreeKernel(GPUKernel);
1891 
1892   for (auto Id : KernelIds)
1893     isl_id_free(Id);
1894 
1895   KernelIds.clear();
1896 }
1897 
1898 /// Compute the DataLayout string for the NVPTX backend.
1899 ///
1900 /// @param is64Bit Are we looking for a 64 bit architecture?
1901 static std::string computeNVPTXDataLayout(bool is64Bit) {
1902   std::string Ret = "";
1903 
1904   if (!is64Bit) {
1905     Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1906            "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1907            "64-v128:128:128-n16:32:64";
1908   } else {
1909     Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1910            "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1911            "64-v128:128:128-n16:32:64";
1912   }
1913 
1914   return Ret;
1915 }
1916 
1917 /// Compute the DataLayout string for a SPIR kernel.
1918 ///
1919 /// @param is64Bit Are we looking for a 64 bit architecture?
1920 static std::string computeSPIRDataLayout(bool is64Bit) {
1921   std::string Ret = "";
1922 
1923   if (!is64Bit) {
1924     Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1925            "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1926            "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1927            "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1928   } else {
1929     Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1930            "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1931            "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1932            "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1933   }
1934 
1935   return Ret;
1936 }
1937 
1938 Function *
1939 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel,
1940                                          SetVector<Value *> &SubtreeValues) {
1941   std::vector<Type *> Args;
1942   std::string Identifier = getKernelFuncName(Kernel->id);
1943 
1944   std::vector<Metadata *> MemoryType;
1945 
1946   for (long i = 0; i < Prog->n_array; i++) {
1947     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1948       continue;
1949 
1950     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1951       isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1952       const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id));
1953       Args.push_back(SAI->getElementType());
1954       MemoryType.push_back(
1955           ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1956     } else {
1957       static const int UseGlobalMemory = 1;
1958       Args.push_back(Builder.getInt8PtrTy(UseGlobalMemory));
1959       MemoryType.push_back(
1960           ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 1)));
1961     }
1962   }
1963 
1964   int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1965 
1966   for (long i = 0; i < NumHostIters; i++) {
1967     Args.push_back(Builder.getInt64Ty());
1968     MemoryType.push_back(
1969         ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1970   }
1971 
1972   int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1973 
1974   for (long i = 0; i < NumVars; i++) {
1975     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1976     Value *Val = IDToValue[Id];
1977     isl_id_free(Id);
1978     Args.push_back(Val->getType());
1979     MemoryType.push_back(
1980         ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1981   }
1982 
1983   for (auto *V : SubtreeValues) {
1984     Args.push_back(V->getType());
1985     MemoryType.push_back(
1986         ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0)));
1987   }
1988 
1989   auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false);
1990   auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier,
1991                               GPUModule.get());
1992 
1993   std::vector<Metadata *> EmptyStrings;
1994 
1995   for (unsigned int i = 0; i < MemoryType.size(); i++) {
1996     EmptyStrings.push_back(MDString::get(FN->getContext(), ""));
1997   }
1998 
1999   if (Arch == GPUArch::SPIR32 || Arch == GPUArch::SPIR64) {
2000     FN->setMetadata("kernel_arg_addr_space",
2001                     MDNode::get(FN->getContext(), MemoryType));
2002     FN->setMetadata("kernel_arg_name",
2003                     MDNode::get(FN->getContext(), EmptyStrings));
2004     FN->setMetadata("kernel_arg_access_qual",
2005                     MDNode::get(FN->getContext(), EmptyStrings));
2006     FN->setMetadata("kernel_arg_type",
2007                     MDNode::get(FN->getContext(), EmptyStrings));
2008     FN->setMetadata("kernel_arg_type_qual",
2009                     MDNode::get(FN->getContext(), EmptyStrings));
2010     FN->setMetadata("kernel_arg_base_type",
2011                     MDNode::get(FN->getContext(), EmptyStrings));
2012   }
2013 
2014   switch (Arch) {
2015   case GPUArch::NVPTX64:
2016     FN->setCallingConv(CallingConv::PTX_Kernel);
2017     break;
2018   case GPUArch::SPIR32:
2019   case GPUArch::SPIR64:
2020     FN->setCallingConv(CallingConv::SPIR_KERNEL);
2021     break;
2022   }
2023 
2024   auto Arg = FN->arg_begin();
2025   for (long i = 0; i < Kernel->n_array; i++) {
2026     if (!ppcg_kernel_requires_array_argument(Kernel, i))
2027       continue;
2028 
2029     Arg->setName(Kernel->array[i].array->name);
2030 
2031     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2032     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2033     Type *EleTy = SAI->getElementType();
2034     Value *Val = &*Arg;
2035     SmallVector<const SCEV *, 4> Sizes;
2036     isl_ast_build *Build =
2037         isl_ast_build_from_context(isl_set_copy(Prog->context));
2038     Sizes.push_back(nullptr);
2039     for (long j = 1, n = Kernel->array[i].array->n_index; j < n; j++) {
2040       isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff(
2041           Build, isl_multi_pw_aff_get_pw_aff(Kernel->array[i].array->bound, j));
2042       auto V = ExprBuilder.create(DimSize);
2043       Sizes.push_back(SE.getSCEV(V));
2044     }
2045     const ScopArrayInfo *SAIRep =
2046         S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, MemoryKind::Array);
2047     LocalArrays.push_back(Val);
2048 
2049     isl_ast_build_free(Build);
2050     KernelIds.push_back(Id);
2051     IDToSAI[Id] = SAIRep;
2052     Arg++;
2053   }
2054 
2055   for (long i = 0; i < NumHostIters; i++) {
2056     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
2057     Arg->setName(isl_id_get_name(Id));
2058     IDToValue[Id] = &*Arg;
2059     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2060     Arg++;
2061   }
2062 
2063   for (long i = 0; i < NumVars; i++) {
2064     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
2065     Arg->setName(isl_id_get_name(Id));
2066     Value *Val = IDToValue[Id];
2067     ValueMap[Val] = &*Arg;
2068     IDToValue[Id] = &*Arg;
2069     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2070     Arg++;
2071   }
2072 
2073   for (auto *V : SubtreeValues) {
2074     Arg->setName(V->getName());
2075     ValueMap[V] = &*Arg;
2076     Arg++;
2077   }
2078 
2079   return FN;
2080 }
2081 
2082 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) {
2083   Intrinsic::ID IntrinsicsBID[2];
2084   Intrinsic::ID IntrinsicsTID[3];
2085 
2086   switch (Arch) {
2087   case GPUArch::SPIR64:
2088   case GPUArch::SPIR32:
2089     llvm_unreachable("Cannot generate NVVM intrinsics for SPIR");
2090   case GPUArch::NVPTX64:
2091     IntrinsicsBID[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x;
2092     IntrinsicsBID[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y;
2093 
2094     IntrinsicsTID[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x;
2095     IntrinsicsTID[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y;
2096     IntrinsicsTID[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z;
2097     break;
2098   }
2099 
2100   auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable {
2101     std::string Name = isl_id_get_name(Id);
2102     Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2103     Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr);
2104     Value *Val = Builder.CreateCall(IntrinsicFn, {});
2105     Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
2106     IDToValue[Id] = Val;
2107     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2108   };
2109 
2110   for (int i = 0; i < Kernel->n_grid; ++i) {
2111     isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i);
2112     addId(Id, IntrinsicsBID[i]);
2113   }
2114 
2115   for (int i = 0; i < Kernel->n_block; ++i) {
2116     isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i);
2117     addId(Id, IntrinsicsTID[i]);
2118   }
2119 }
2120 
2121 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel *Kernel,
2122                                            bool SizeTypeIs64bit) {
2123   const char *GroupName[3] = {"__gen_ocl_get_group_id0",
2124                               "__gen_ocl_get_group_id1",
2125                               "__gen_ocl_get_group_id2"};
2126 
2127   const char *LocalName[3] = {"__gen_ocl_get_local_id0",
2128                               "__gen_ocl_get_local_id1",
2129                               "__gen_ocl_get_local_id2"};
2130   IntegerType *SizeT =
2131       SizeTypeIs64bit ? Builder.getInt64Ty() : Builder.getInt32Ty();
2132 
2133   auto createFunc = [this](const char *Name, __isl_take isl_id *Id,
2134                            IntegerType *SizeT) mutable {
2135     Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2136     Function *FN = M->getFunction(Name);
2137 
2138     // If FN is not available, declare it.
2139     if (!FN) {
2140       GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
2141       std::vector<Type *> Args;
2142       FunctionType *Ty = FunctionType::get(SizeT, Args, false);
2143       FN = Function::Create(Ty, Linkage, Name, M);
2144       FN->setCallingConv(CallingConv::SPIR_FUNC);
2145     }
2146 
2147     Value *Val = Builder.CreateCall(FN, {});
2148     if (SizeT == Builder.getInt32Ty())
2149       Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
2150     IDToValue[Id] = Val;
2151     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
2152   };
2153 
2154   for (int i = 0; i < Kernel->n_grid; ++i)
2155     createFunc(GroupName[i], isl_id_list_get_id(Kernel->block_ids, i), SizeT);
2156 
2157   for (int i = 0; i < Kernel->n_block; ++i)
2158     createFunc(LocalName[i], isl_id_list_get_id(Kernel->thread_ids, i), SizeT);
2159 }
2160 
2161 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel *Kernel, Function *FN) {
2162   auto Arg = FN->arg_begin();
2163   for (long i = 0; i < Kernel->n_array; i++) {
2164     if (!ppcg_kernel_requires_array_argument(Kernel, i))
2165       continue;
2166 
2167     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2168     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2169     isl_id_free(Id);
2170 
2171     if (SAI->getNumberOfDimensions() > 0) {
2172       Arg++;
2173       continue;
2174     }
2175 
2176     Value *Val = &*Arg;
2177 
2178     if (!gpu_array_is_read_only_scalar(&Prog->array[i])) {
2179       Type *TypePtr = SAI->getElementType()->getPointerTo();
2180       Value *TypedArgPtr = Builder.CreatePointerCast(Val, TypePtr);
2181       Val = Builder.CreateLoad(SAI->getElementType(), TypedArgPtr);
2182     }
2183 
2184     Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
2185     Builder.CreateStore(Val, Alloca);
2186 
2187     Arg++;
2188   }
2189 }
2190 
2191 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel *Kernel) {
2192   auto *FN = Builder.GetInsertBlock()->getParent();
2193   auto Arg = FN->arg_begin();
2194 
2195   bool StoredScalar = false;
2196   for (long i = 0; i < Kernel->n_array; i++) {
2197     if (!ppcg_kernel_requires_array_argument(Kernel, i))
2198       continue;
2199 
2200     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
2201     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id));
2202     isl_id_free(Id);
2203 
2204     if (SAI->getNumberOfDimensions() > 0) {
2205       Arg++;
2206       continue;
2207     }
2208 
2209     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
2210       Arg++;
2211       continue;
2212     }
2213 
2214     Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
2215     Value *ArgPtr = &*Arg;
2216     Type *TypePtr = SAI->getElementType()->getPointerTo();
2217     Value *TypedArgPtr = Builder.CreatePointerCast(ArgPtr, TypePtr);
2218     Value *Val = Builder.CreateLoad(SAI->getElementType(), Alloca);
2219     Builder.CreateStore(Val, TypedArgPtr);
2220     StoredScalar = true;
2221 
2222     Arg++;
2223   }
2224 
2225   if (StoredScalar) {
2226     /// In case more than one thread contains scalar stores, the generated
2227     /// code might be incorrect, if we only store at the end of the kernel.
2228     /// To support this case we need to store these scalars back at each
2229     /// memory store or at least before each kernel barrier.
2230     if (Kernel->n_block != 0 || Kernel->n_grid != 0) {
2231       BuildSuccessful = 0;
2232       LLVM_DEBUG(
2233           dbgs() << getUniqueScopName(&S)
2234                  << " has a store to a scalar value that"
2235                     " would be undefined to run in parallel. Bailing out.\n";);
2236     }
2237   }
2238 }
2239 
2240 void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) {
2241   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
2242 
2243   for (int i = 0; i < Kernel->n_var; ++i) {
2244     struct ppcg_kernel_var &Var = Kernel->var[i];
2245     isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set);
2246     Type *EleTy = ScopArrayInfo::getFromId(isl::manage(Id))->getElementType();
2247 
2248     Type *ArrayTy = EleTy;
2249     SmallVector<const SCEV *, 4> Sizes;
2250 
2251     Sizes.push_back(nullptr);
2252     for (unsigned int j = 1; j < Var.array->n_index; ++j) {
2253       isl_val *Val = isl_vec_get_element_val(Var.size, j);
2254       long Bound = isl_val_get_num_si(Val);
2255       isl_val_free(Val);
2256       Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound));
2257     }
2258 
2259     for (int j = Var.array->n_index - 1; j >= 0; --j) {
2260       isl_val *Val = isl_vec_get_element_val(Var.size, j);
2261       long Bound = isl_val_get_num_si(Val);
2262       isl_val_free(Val);
2263       ArrayTy = ArrayType::get(ArrayTy, Bound);
2264     }
2265 
2266     const ScopArrayInfo *SAI;
2267     Value *Allocation;
2268     if (Var.type == ppcg_access_shared) {
2269       auto GlobalVar = new GlobalVariable(
2270           *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name,
2271           nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal, 3);
2272       GlobalVar->setAlignment(llvm::Align(EleTy->getPrimitiveSizeInBits() / 8));
2273       GlobalVar->setInitializer(Constant::getNullValue(ArrayTy));
2274 
2275       Allocation = GlobalVar;
2276     } else if (Var.type == ppcg_access_private) {
2277       Allocation = Builder.CreateAlloca(ArrayTy, 0, "private_array");
2278     } else {
2279       llvm_unreachable("unknown variable type");
2280     }
2281     SAI =
2282         S.getOrCreateScopArrayInfo(Allocation, EleTy, Sizes, MemoryKind::Array);
2283     Id = isl_id_alloc(S.getIslCtx().get(), Var.name, nullptr);
2284     IDToValue[Id] = Allocation;
2285     LocalArrays.push_back(Allocation);
2286     KernelIds.push_back(Id);
2287     IDToSAI[Id] = SAI;
2288   }
2289 }
2290 
2291 void GPUNodeBuilder::createKernelFunction(
2292     ppcg_kernel *Kernel, SetVector<Value *> &SubtreeValues,
2293     SetVector<Function *> &SubtreeFunctions) {
2294   std::string Identifier = getKernelFuncName(Kernel->id);
2295   GPUModule.reset(new Module(Identifier, Builder.getContext()));
2296 
2297   switch (Arch) {
2298   case GPUArch::NVPTX64:
2299     if (Runtime == GPURuntime::CUDA)
2300       GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2301     else if (Runtime == GPURuntime::OpenCL)
2302       GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
2303     GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
2304     break;
2305   case GPUArch::SPIR32:
2306     GPUModule->setTargetTriple(Triple::normalize("spir-unknown-unknown"));
2307     GPUModule->setDataLayout(computeSPIRDataLayout(false /* is64Bit */));
2308     break;
2309   case GPUArch::SPIR64:
2310     GPUModule->setTargetTriple(Triple::normalize("spir64-unknown-unknown"));
2311     GPUModule->setDataLayout(computeSPIRDataLayout(true /* is64Bit */));
2312     break;
2313   }
2314 
2315   Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues);
2316 
2317   BasicBlock *PrevBlock = Builder.GetInsertBlock();
2318   auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN);
2319 
2320   DT.addNewBlock(EntryBlock, PrevBlock);
2321 
2322   Builder.SetInsertPoint(EntryBlock);
2323   Builder.CreateRetVoid();
2324   Builder.SetInsertPoint(EntryBlock, EntryBlock->begin());
2325 
2326   ScopDetection::markFunctionAsInvalid(FN);
2327 
2328   prepareKernelArguments(Kernel, FN);
2329   createKernelVariables(Kernel, FN);
2330 
2331   switch (Arch) {
2332   case GPUArch::NVPTX64:
2333     insertKernelIntrinsics(Kernel);
2334     break;
2335   case GPUArch::SPIR32:
2336     insertKernelCallsSPIR(Kernel, false);
2337     break;
2338   case GPUArch::SPIR64:
2339     insertKernelCallsSPIR(Kernel, true);
2340     break;
2341   }
2342 }
2343 
2344 std::string GPUNodeBuilder::createKernelASM() {
2345   llvm::Triple GPUTriple;
2346 
2347   switch (Arch) {
2348   case GPUArch::NVPTX64:
2349     switch (Runtime) {
2350     case GPURuntime::CUDA:
2351       GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
2352       break;
2353     case GPURuntime::OpenCL:
2354       GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
2355       break;
2356     }
2357     break;
2358   case GPUArch::SPIR64:
2359   case GPUArch::SPIR32:
2360     std::string SPIRAssembly;
2361     raw_string_ostream IROstream(SPIRAssembly);
2362     IROstream << *GPUModule;
2363     IROstream.flush();
2364     return SPIRAssembly;
2365   }
2366 
2367   std::string ErrMsg;
2368   auto GPUTarget = TargetRegistry::lookupTarget(GPUTriple.getTriple(), ErrMsg);
2369 
2370   if (!GPUTarget) {
2371     errs() << ErrMsg << "\n";
2372     return "";
2373   }
2374 
2375   TargetOptions Options;
2376   Options.UnsafeFPMath = FastMath;
2377 
2378   std::string subtarget;
2379 
2380   switch (Arch) {
2381   case GPUArch::NVPTX64:
2382     subtarget = CudaVersion;
2383     break;
2384   case GPUArch::SPIR32:
2385   case GPUArch::SPIR64:
2386     llvm_unreachable("No subtarget for SPIR architecture");
2387   }
2388 
2389   std::unique_ptr<TargetMachine> TargetM(GPUTarget->createTargetMachine(
2390       GPUTriple.getTriple(), subtarget, "", Options, Optional<Reloc::Model>()));
2391 
2392   SmallString<0> ASMString;
2393   raw_svector_ostream ASMStream(ASMString);
2394   llvm::legacy::PassManager PM;
2395 
2396   PM.add(createTargetTransformInfoWrapperPass(TargetM->getTargetIRAnalysis()));
2397 
2398   if (TargetM->addPassesToEmitFile(PM, ASMStream, nullptr, CGFT_AssemblyFile,
2399                                    true /* verify */)) {
2400     errs() << "The target does not support generation of this file type!\n";
2401     return "";
2402   }
2403 
2404   PM.run(*GPUModule);
2405 
2406   return ASMStream.str().str();
2407 }
2408 
2409 bool GPUNodeBuilder::requiresCUDALibDevice() {
2410   bool RequiresLibDevice = false;
2411   for (Function &F : GPUModule->functions()) {
2412     if (!F.isDeclaration())
2413       continue;
2414 
2415     const std::string CUDALibDeviceFunc = getCUDALibDeviceFuntion(F.getName());
2416     if (CUDALibDeviceFunc.length() != 0) {
2417       // We need to handle the case where a module looks like this:
2418       // @expf(..)
2419       // @llvm.exp.f64(..)
2420       // Both of these functions would be renamed to `__nv_expf`.
2421       //
2422       // So, we must first check for the existence of the libdevice function.
2423       // If this exists, we replace our current function with it.
2424       //
2425       // If it does not exist, we rename the current function to the
2426       // libdevice functiono name.
2427       if (Function *Replacement = F.getParent()->getFunction(CUDALibDeviceFunc))
2428         F.replaceAllUsesWith(Replacement);
2429       else
2430         F.setName(CUDALibDeviceFunc);
2431       RequiresLibDevice = true;
2432     }
2433   }
2434 
2435   return RequiresLibDevice;
2436 }
2437 
2438 void GPUNodeBuilder::addCUDALibDevice() {
2439   if (Arch != GPUArch::NVPTX64)
2440     return;
2441 
2442   if (requiresCUDALibDevice()) {
2443     SMDiagnostic Error;
2444 
2445     errs() << CUDALibDevice << "\n";
2446     auto LibDeviceModule =
2447         parseIRFile(CUDALibDevice, Error, GPUModule->getContext());
2448 
2449     if (!LibDeviceModule) {
2450       BuildSuccessful = false;
2451       report_fatal_error("Could not find or load libdevice. Skipping GPU "
2452                          "kernel generation. Please set -polly-acc-libdevice "
2453                          "accordingly.\n");
2454       return;
2455     }
2456 
2457     Linker L(*GPUModule);
2458 
2459     // Set an nvptx64 target triple to avoid linker warnings. The original
2460     // triple of the libdevice files are nvptx-unknown-unknown.
2461     LibDeviceModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2462     L.linkInModule(std::move(LibDeviceModule), Linker::LinkOnlyNeeded);
2463   }
2464 }
2465 
2466 std::string GPUNodeBuilder::finalizeKernelFunction() {
2467 
2468   if (verifyModule(*GPUModule)) {
2469     LLVM_DEBUG(dbgs() << "verifyModule failed on module:\n";
2470                GPUModule->print(dbgs(), nullptr); dbgs() << "\n";);
2471     LLVM_DEBUG(dbgs() << "verifyModule Error:\n";
2472                verifyModule(*GPUModule, &dbgs()););
2473 
2474     if (FailOnVerifyModuleFailure)
2475       llvm_unreachable("VerifyModule failed.");
2476 
2477     BuildSuccessful = false;
2478     return "";
2479   }
2480 
2481   addCUDALibDevice();
2482 
2483   if (DumpKernelIR)
2484     outs() << *GPUModule << "\n";
2485 
2486   if (Arch != GPUArch::SPIR32 && Arch != GPUArch::SPIR64) {
2487     // Optimize module.
2488     llvm::legacy::PassManager OptPasses;
2489     PassManagerBuilder PassBuilder;
2490     PassBuilder.OptLevel = 3;
2491     PassBuilder.SizeLevel = 0;
2492     PassBuilder.populateModulePassManager(OptPasses);
2493     OptPasses.run(*GPUModule);
2494   }
2495 
2496   std::string Assembly = createKernelASM();
2497 
2498   if (DumpKernelASM)
2499     outs() << Assembly << "\n";
2500 
2501   GPUModule.release();
2502   KernelIDs.clear();
2503 
2504   return Assembly;
2505 }
2506 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff`
2507 /// @param PwAffs The list of piecewise affine functions to create an
2508 ///               `isl_pw_aff_list` from. We expect an rvalue ref because
2509 ///               all the isl_pw_aff are used up by this function.
2510 ///
2511 /// @returns  The `isl_pw_aff_list`.
2512 __isl_give isl_pw_aff_list *
2513 createPwAffList(isl_ctx *Context,
2514                 const std::vector<__isl_take isl_pw_aff *> &&PwAffs) {
2515   isl_pw_aff_list *List = isl_pw_aff_list_alloc(Context, PwAffs.size());
2516 
2517   for (unsigned i = 0; i < PwAffs.size(); i++) {
2518     List = isl_pw_aff_list_insert(List, i, PwAffs[i]);
2519   }
2520   return List;
2521 }
2522 
2523 /// Align all the `PwAffs` such that they have the same parameter dimensions.
2524 ///
2525 /// We loop over all `pw_aff` and align all of their spaces together to
2526 /// create a common space for all the `pw_aff`. This common space is the
2527 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start
2528 /// with the given `SeedSpace`.
2529 /// @param PwAffs    The list of piecewise affine functions we want to align.
2530 ///                  This is an rvalue reference because the entire vector is
2531 ///                  used up by the end of the operation.
2532 /// @param SeedSpace The space to start the alignment process with.
2533 /// @returns         A std::pair, whose first element is the aligned space,
2534 ///                  whose second element is the vector of aligned piecewise
2535 ///                  affines.
2536 static std::pair<__isl_give isl_space *, std::vector<__isl_give isl_pw_aff *>>
2537 alignPwAffs(const std::vector<__isl_take isl_pw_aff *> &&PwAffs,
2538             __isl_take isl_space *SeedSpace) {
2539   assert(SeedSpace && "Invalid seed space given.");
2540 
2541   isl_space *AlignSpace = SeedSpace;
2542   for (isl_pw_aff *PwAff : PwAffs) {
2543     isl_space *PwAffSpace = isl_pw_aff_get_domain_space(PwAff);
2544     AlignSpace = isl_space_align_params(AlignSpace, PwAffSpace);
2545   }
2546   std::vector<isl_pw_aff *> AdjustedPwAffs;
2547 
2548   for (unsigned i = 0; i < PwAffs.size(); i++) {
2549     isl_pw_aff *Adjusted = PwAffs[i];
2550     assert(Adjusted && "Invalid pw_aff given.");
2551     Adjusted = isl_pw_aff_align_params(Adjusted, isl_space_copy(AlignSpace));
2552     AdjustedPwAffs.push_back(Adjusted);
2553   }
2554   return std::make_pair(AlignSpace, AdjustedPwAffs);
2555 }
2556 
2557 namespace {
2558 class PPCGCodeGeneration : public ScopPass {
2559 public:
2560   static char ID;
2561 
2562   GPURuntime Runtime = GPURuntime::CUDA;
2563 
2564   GPUArch Architecture = GPUArch::NVPTX64;
2565 
2566   /// The scop that is currently processed.
2567   Scop *S;
2568 
2569   LoopInfo *LI;
2570   DominatorTree *DT;
2571   ScalarEvolution *SE;
2572   const DataLayout *DL;
2573   RegionInfo *RI;
2574 
2575   PPCGCodeGeneration() : ScopPass(ID) {
2576     // Apply defaults.
2577     Runtime = GPURuntimeChoice;
2578     Architecture = GPUArchChoice;
2579   }
2580 
2581   /// Construct compilation options for PPCG.
2582   ///
2583   /// @returns The compilation options.
2584   ppcg_options *createPPCGOptions() {
2585     auto DebugOptions =
2586         (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options));
2587     auto Options = (ppcg_options *)malloc(sizeof(ppcg_options));
2588 
2589     DebugOptions->dump_schedule_constraints = false;
2590     DebugOptions->dump_schedule = false;
2591     DebugOptions->dump_final_schedule = false;
2592     DebugOptions->dump_sizes = false;
2593     DebugOptions->verbose = false;
2594 
2595     Options->debug = DebugOptions;
2596 
2597     Options->group_chains = false;
2598     Options->reschedule = true;
2599     Options->scale_tile_loops = false;
2600     Options->wrap = false;
2601 
2602     Options->non_negative_parameters = false;
2603     Options->ctx = nullptr;
2604     Options->sizes = nullptr;
2605 
2606     Options->tile = true;
2607     Options->tile_size = 32;
2608 
2609     Options->isolate_full_tiles = false;
2610 
2611     Options->use_private_memory = PrivateMemory;
2612     Options->use_shared_memory = SharedMemory;
2613     Options->max_shared_memory = 48 * 1024;
2614 
2615     Options->target = PPCG_TARGET_CUDA;
2616     Options->openmp = false;
2617     Options->linearize_device_arrays = true;
2618     Options->allow_gnu_extensions = false;
2619 
2620     Options->unroll_copy_shared = false;
2621     Options->unroll_gpu_tile = false;
2622     Options->live_range_reordering = true;
2623 
2624     Options->live_range_reordering = true;
2625     Options->hybrid = false;
2626     Options->opencl_compiler_options = nullptr;
2627     Options->opencl_use_gpu = false;
2628     Options->opencl_n_include_file = 0;
2629     Options->opencl_include_files = nullptr;
2630     Options->opencl_print_kernel_types = false;
2631     Options->opencl_embed_kernel_code = false;
2632 
2633     Options->save_schedule_file = nullptr;
2634     Options->load_schedule_file = nullptr;
2635 
2636     return Options;
2637   }
2638 
2639   /// Get a tagged access relation containing all accesses of type @p AccessTy.
2640   ///
2641   /// Instead of a normal access of the form:
2642   ///
2643   ///   Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2644   ///
2645   /// a tagged access has the form
2646   ///
2647   ///   [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2648   ///
2649   /// where 'id' is an additional space that references the memory access that
2650   /// triggered the access.
2651   ///
2652   /// @param AccessTy The type of the memory accesses to collect.
2653   ///
2654   /// @return The relation describing all tagged memory accesses.
2655   isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) {
2656     isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace().release());
2657 
2658     for (auto &Stmt : *S)
2659       for (auto &Acc : Stmt)
2660         if (Acc->getType() == AccessTy) {
2661           isl_map *Relation = Acc->getAccessRelation().release();
2662           Relation =
2663               isl_map_intersect_domain(Relation, Stmt.getDomain().release());
2664 
2665           isl_space *Space = isl_map_get_space(Relation);
2666           Space = isl_space_range(Space);
2667           Space = isl_space_from_range(Space);
2668           Space =
2669               isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release());
2670           isl_map *Universe = isl_map_universe(Space);
2671           Relation = isl_map_domain_product(Relation, Universe);
2672           Accesses = isl_union_map_add_map(Accesses, Relation);
2673         }
2674 
2675     return Accesses;
2676   }
2677 
2678   /// Get the set of all read accesses, tagged with the access id.
2679   ///
2680   /// @see getTaggedAccesses
2681   isl_union_map *getTaggedReads() {
2682     return getTaggedAccesses(MemoryAccess::READ);
2683   }
2684 
2685   /// Get the set of all may (and must) accesses, tagged with the access id.
2686   ///
2687   /// @see getTaggedAccesses
2688   isl_union_map *getTaggedMayWrites() {
2689     return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE),
2690                                getTaggedAccesses(MemoryAccess::MUST_WRITE));
2691   }
2692 
2693   /// Get the set of all must accesses, tagged with the access id.
2694   ///
2695   /// @see getTaggedAccesses
2696   isl_union_map *getTaggedMustWrites() {
2697     return getTaggedAccesses(MemoryAccess::MUST_WRITE);
2698   }
2699 
2700   /// Collect parameter and array names as isl_ids.
2701   ///
2702   /// To reason about the different parameters and arrays used, ppcg requires
2703   /// a list of all isl_ids in use. As PPCG traditionally performs
2704   /// source-to-source compilation each of these isl_ids is mapped to the
2705   /// expression that represents it. As we do not have a corresponding
2706   /// expression in Polly, we just map each id to a 'zero' expression to match
2707   /// the data format that ppcg expects.
2708   ///
2709   /// @returns Retun a map from collected ids to 'zero' ast expressions.
2710   __isl_give isl_id_to_ast_expr *getNames() {
2711     auto *Names = isl_id_to_ast_expr_alloc(
2712         S->getIslCtx().get(),
2713         S->getNumParams() + std::distance(S->array_begin(), S->array_end()));
2714     auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx().get()));
2715 
2716     for (const SCEV *P : S->parameters()) {
2717       isl_id *Id = S->getIdForParam(P).release();
2718       Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2719     }
2720 
2721     for (auto &Array : S->arrays()) {
2722       auto Id = Array->getBasePtrId().release();
2723       Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2724     }
2725 
2726     isl_ast_expr_free(Zero);
2727 
2728     return Names;
2729   }
2730 
2731   /// Create a new PPCG scop from the current scop.
2732   ///
2733   /// The PPCG scop is initialized with data from the current polly::Scop. From
2734   /// this initial data, the data-dependences in the PPCG scop are initialized.
2735   /// We do not use Polly's dependence analysis for now, to ensure we match
2736   /// the PPCG default behaviour more closely.
2737   ///
2738   /// @returns A new ppcg scop.
2739   ppcg_scop *createPPCGScop() {
2740     MustKillsInfo KillsInfo = computeMustKillsInfo(*S);
2741 
2742     auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop));
2743 
2744     PPCGScop->options = createPPCGOptions();
2745     // enable live range reordering
2746     PPCGScop->options->live_range_reordering = 1;
2747 
2748     PPCGScop->start = 0;
2749     PPCGScop->end = 0;
2750 
2751     PPCGScop->context = S->getContext().release();
2752     PPCGScop->domain = S->getDomains().release();
2753     // TODO: investigate this further. PPCG calls collect_call_domains.
2754     PPCGScop->call = isl_union_set_from_set(S->getContext().release());
2755     PPCGScop->tagged_reads = getTaggedReads();
2756     PPCGScop->reads = S->getReads().release();
2757     PPCGScop->live_in = nullptr;
2758     PPCGScop->tagged_may_writes = getTaggedMayWrites();
2759     PPCGScop->may_writes = S->getWrites().release();
2760     PPCGScop->tagged_must_writes = getTaggedMustWrites();
2761     PPCGScop->must_writes = S->getMustWrites().release();
2762     PPCGScop->live_out = nullptr;
2763     PPCGScop->tagged_must_kills = KillsInfo.TaggedMustKills.release();
2764     PPCGScop->must_kills = KillsInfo.MustKills.release();
2765 
2766     PPCGScop->tagger = nullptr;
2767     PPCGScop->independence =
2768         isl_union_map_empty(isl_set_get_space(PPCGScop->context));
2769     PPCGScop->dep_flow = nullptr;
2770     PPCGScop->tagged_dep_flow = nullptr;
2771     PPCGScop->dep_false = nullptr;
2772     PPCGScop->dep_forced = nullptr;
2773     PPCGScop->dep_order = nullptr;
2774     PPCGScop->tagged_dep_order = nullptr;
2775 
2776     PPCGScop->schedule = S->getScheduleTree().release();
2777     // If we have something non-trivial to kill, add it to the schedule
2778     if (KillsInfo.KillsSchedule.get())
2779       PPCGScop->schedule = isl_schedule_sequence(
2780           PPCGScop->schedule, KillsInfo.KillsSchedule.release());
2781 
2782     PPCGScop->names = getNames();
2783     PPCGScop->pet = nullptr;
2784 
2785     compute_tagger(PPCGScop);
2786     compute_dependences(PPCGScop);
2787     eliminate_dead_code(PPCGScop);
2788 
2789     return PPCGScop;
2790   }
2791 
2792   /// Collect the array accesses in a statement.
2793   ///
2794   /// @param Stmt The statement for which to collect the accesses.
2795   ///
2796   /// @returns A list of array accesses.
2797   gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) {
2798     gpu_stmt_access *Accesses = nullptr;
2799 
2800     for (MemoryAccess *Acc : Stmt) {
2801       auto Access =
2802           isl_alloc_type(S->getIslCtx().get(), struct gpu_stmt_access);
2803       Access->read = Acc->isRead();
2804       Access->write = Acc->isWrite();
2805       Access->access = Acc->getAccessRelation().release();
2806       isl_space *Space = isl_map_get_space(Access->access);
2807       Space = isl_space_range(Space);
2808       Space = isl_space_from_range(Space);
2809       Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release());
2810       isl_map *Universe = isl_map_universe(Space);
2811       Access->tagged_access =
2812           isl_map_domain_product(Acc->getAccessRelation().release(), Universe);
2813       Access->exact_write = !Acc->isMayWrite();
2814       Access->ref_id = Acc->getId().release();
2815       Access->next = Accesses;
2816       Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions();
2817       // TODO: Also mark one-element accesses to arrays as fixed-element.
2818       Access->fixed_element =
2819           Acc->isLatestScalarKind() ? isl_bool_true : isl_bool_false;
2820       Accesses = Access;
2821     }
2822 
2823     return Accesses;
2824   }
2825 
2826   /// Collect the list of GPU statements.
2827   ///
2828   /// Each statement has an id, a pointer to the underlying data structure,
2829   /// as well as a list with all memory accesses.
2830   ///
2831   /// TODO: Initialize the list of memory accesses.
2832   ///
2833   /// @returns A linked-list of statements.
2834   gpu_stmt *getStatements() {
2835     gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx().get(), struct gpu_stmt,
2836                                        std::distance(S->begin(), S->end()));
2837 
2838     int i = 0;
2839     for (auto &Stmt : *S) {
2840       gpu_stmt *GPUStmt = &Stmts[i];
2841 
2842       GPUStmt->id = Stmt.getDomainId().release();
2843 
2844       // We use the pet stmt pointer to keep track of the Polly statements.
2845       GPUStmt->stmt = (pet_stmt *)&Stmt;
2846       GPUStmt->accesses = getStmtAccesses(Stmt);
2847       i++;
2848     }
2849 
2850     return Stmts;
2851   }
2852 
2853   /// Derive the extent of an array.
2854   ///
2855   /// The extent of an array is the set of elements that are within the
2856   /// accessed array. For the inner dimensions, the extent constraints are
2857   /// 0 and the size of the corresponding array dimension. For the first
2858   /// (outermost) dimension, the extent constraints are the minimal and maximal
2859   /// subscript value for the first dimension.
2860   ///
2861   /// @param Array The array to derive the extent for.
2862   ///
2863   /// @returns An isl_set describing the extent of the array.
2864   isl::set getExtent(ScopArrayInfo *Array) {
2865     unsigned NumDims = Array->getNumberOfDimensions();
2866 
2867     if (Array->getNumberOfDimensions() == 0)
2868       return isl::set::universe(Array->getSpace());
2869 
2870     isl::union_map Accesses = S->getAccesses(Array);
2871     isl::union_set AccessUSet = Accesses.range();
2872     AccessUSet = AccessUSet.coalesce();
2873     AccessUSet = AccessUSet.detect_equalities();
2874     AccessUSet = AccessUSet.coalesce();
2875 
2876     if (AccessUSet.is_empty())
2877       return isl::set::empty(Array->getSpace());
2878 
2879     isl::set AccessSet = AccessUSet.extract_set(Array->getSpace());
2880 
2881     isl::local_space LS = isl::local_space(Array->getSpace());
2882 
2883     isl::pw_aff Val = isl::aff::var_on_domain(LS, isl::dim::set, 0);
2884     isl::pw_aff OuterMin = AccessSet.dim_min(0);
2885     isl::pw_aff OuterMax = AccessSet.dim_max(0);
2886     OuterMin = OuterMin.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2887     OuterMax = OuterMax.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2888     OuterMin = OuterMin.set_tuple_id(isl::dim::in, Array->getBasePtrId());
2889     OuterMax = OuterMax.set_tuple_id(isl::dim::in, Array->getBasePtrId());
2890 
2891     isl::set Extent = isl::set::universe(Array->getSpace());
2892 
2893     Extent = Extent.intersect(OuterMin.le_set(Val));
2894     Extent = Extent.intersect(OuterMax.ge_set(Val));
2895 
2896     for (unsigned i = 1; i < NumDims; ++i)
2897       Extent = Extent.lower_bound_si(isl::dim::set, i, 0);
2898 
2899     for (unsigned i = 0; i < NumDims; ++i) {
2900       isl::pw_aff PwAff = Array->getDimensionSizePw(i);
2901 
2902       // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2903       // Fortran array will we have a legitimate dimension.
2904       if (PwAff.is_null()) {
2905         assert(i == 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2906         continue;
2907       }
2908 
2909       isl::pw_aff Val = isl::aff::var_on_domain(
2910           isl::local_space(Array->getSpace()), isl::dim::set, i);
2911       PwAff = PwAff.add_dims(isl::dim::in, Val.dim(isl::dim::in));
2912       PwAff = PwAff.set_tuple_id(isl::dim::in, Val.get_tuple_id(isl::dim::in));
2913       isl::set Set = PwAff.gt_set(Val);
2914       Extent = Set.intersect(Extent);
2915     }
2916 
2917     return Extent;
2918   }
2919 
2920   /// Derive the bounds of an array.
2921   ///
2922   /// For the first dimension we derive the bound of the array from the extent
2923   /// of this dimension. For inner dimensions we obtain their size directly from
2924   /// ScopArrayInfo.
2925   ///
2926   /// @param PPCGArray The array to compute bounds for.
2927   /// @param Array The polly array from which to take the information.
2928   void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) {
2929     std::vector<isl_pw_aff *> Bounds;
2930 
2931     if (PPCGArray.n_index > 0) {
2932       if (isl_set_is_empty(PPCGArray.extent)) {
2933         isl_set *Dom = isl_set_copy(PPCGArray.extent);
2934         isl_local_space *LS = isl_local_space_from_space(
2935             isl_space_params(isl_set_get_space(Dom)));
2936         isl_set_free(Dom);
2937         isl_pw_aff *Zero = isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS));
2938         Bounds.push_back(Zero);
2939       } else {
2940         isl_set *Dom = isl_set_copy(PPCGArray.extent);
2941         Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1);
2942         isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0);
2943         isl_set_free(Dom);
2944         Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound));
2945         isl_local_space *LS =
2946             isl_local_space_from_space(isl_set_get_space(Dom));
2947         isl_aff *One = isl_aff_zero_on_domain(LS);
2948         One = isl_aff_add_constant_si(One, 1);
2949         Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One));
2950         Bound = isl_pw_aff_gist(Bound, S->getContext().release());
2951         Bounds.push_back(Bound);
2952       }
2953     }
2954 
2955     for (unsigned i = 1; i < PPCGArray.n_index; ++i) {
2956       isl_pw_aff *Bound = Array->getDimensionSizePw(i).release();
2957       auto LS = isl_pw_aff_get_domain_space(Bound);
2958       auto Aff = isl_multi_aff_zero(LS);
2959 
2960       // We need types to work out, which is why we perform this weird dance
2961       // with `Aff` and `Bound`. Consider this example:
2962 
2963       // LS: [p] -> { [] }
2964       // Zero: [p] -> { [] } | Implicitly, is [p] -> { ~ -> [] }.
2965       // This `~` is used to denote a "null space" (which is different from
2966       // a *zero dimensional* space), which is something that ISL does not
2967       // show you when pretty printing.
2968 
2969       // Bound: [p] -> { [] -> [(10p)] } | Here, the [] is a *zero dimensional*
2970       // space, not a "null space" which does not exist at all.
2971 
2972       // When we pullback (precompose) `Bound` with `Zero`, we get:
2973       // Bound . Zero =
2974       //     ([p] -> { [] -> [(10p)] }) . ([p] -> {~ -> [] }) =
2975       //     [p] -> { ~ -> [(10p)] } =
2976       //     [p] -> [(10p)] (as ISL pretty prints it)
2977       // Bound Pullback: [p] -> { [(10p)] }
2978 
2979       // We want this kind of an expression for Bound, without a
2980       // zero dimensional input, but with a "null space" input for the types
2981       // to work out later on, as far as I (Siddharth Bhat) understand.
2982       // I was unable to find a reference to this in the ISL manual.
2983       // References: Tobias Grosser.
2984 
2985       Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff);
2986       Bounds.push_back(Bound);
2987     }
2988 
2989     /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff`
2990     /// to have the same parameter dimensions. So, we need to align them to an
2991     /// appropriate space.
2992     /// Scop::Context is _not_ an appropriate space, because when we have
2993     /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not
2994     /// contain all parameter dimensions.
2995     /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together.
2996     isl_space *SeedAlignSpace = S->getParamSpace().release();
2997     SeedAlignSpace = isl_space_add_dims(SeedAlignSpace, isl_dim_set, 1);
2998 
2999     isl_space *AlignSpace = nullptr;
3000     std::vector<isl_pw_aff *> AlignedBounds;
3001     std::tie(AlignSpace, AlignedBounds) =
3002         alignPwAffs(std::move(Bounds), SeedAlignSpace);
3003 
3004     assert(AlignSpace && "alignPwAffs did not initialise AlignSpace");
3005 
3006     isl_pw_aff_list *BoundsList =
3007         createPwAffList(S->getIslCtx().get(), std::move(AlignedBounds));
3008 
3009     isl_space *BoundsSpace = isl_set_get_space(PPCGArray.extent);
3010     BoundsSpace = isl_space_align_params(BoundsSpace, AlignSpace);
3011 
3012     assert(BoundsSpace && "Unable to access space of array.");
3013     assert(BoundsList && "Unable to access list of bounds.");
3014 
3015     PPCGArray.bound =
3016         isl_multi_pw_aff_from_pw_aff_list(BoundsSpace, BoundsList);
3017     assert(PPCGArray.bound && "PPCGArray.bound was not constructed correctly.");
3018   }
3019 
3020   /// Create the arrays for @p PPCGProg.
3021   ///
3022   /// @param PPCGProg The program to compute the arrays for.
3023   void createArrays(gpu_prog *PPCGProg,
3024                     const SmallVector<ScopArrayInfo *, 4> &ValidSAIs) {
3025     int i = 0;
3026     for (auto &Array : ValidSAIs) {
3027       std::string TypeName;
3028       raw_string_ostream OS(TypeName);
3029 
3030       OS << *Array->getElementType();
3031       TypeName = OS.str();
3032 
3033       gpu_array_info &PPCGArray = PPCGProg->array[i];
3034 
3035       PPCGArray.space = Array->getSpace().release();
3036       PPCGArray.type = strdup(TypeName.c_str());
3037       PPCGArray.size = DL->getTypeAllocSize(Array->getElementType());
3038       PPCGArray.name = strdup(Array->getName().c_str());
3039       PPCGArray.extent = nullptr;
3040       PPCGArray.n_index = Array->getNumberOfDimensions();
3041       PPCGArray.extent = getExtent(Array).release();
3042       PPCGArray.n_ref = 0;
3043       PPCGArray.refs = nullptr;
3044       PPCGArray.accessed = true;
3045       PPCGArray.read_only_scalar =
3046           Array->isReadOnly() && Array->getNumberOfDimensions() == 0;
3047       PPCGArray.has_compound_element = false;
3048       PPCGArray.local = false;
3049       PPCGArray.declare_local = false;
3050       PPCGArray.global = false;
3051       PPCGArray.linearize = false;
3052       PPCGArray.dep_order = nullptr;
3053       PPCGArray.user = Array;
3054 
3055       PPCGArray.bound = nullptr;
3056       setArrayBounds(PPCGArray, Array);
3057       i++;
3058 
3059       collect_references(PPCGProg, &PPCGArray);
3060       PPCGArray.only_fixed_element = only_fixed_element_accessed(&PPCGArray);
3061     }
3062   }
3063 
3064   /// Create an identity map between the arrays in the scop.
3065   ///
3066   /// @returns An identity map between the arrays in the scop.
3067   isl_union_map *getArrayIdentity() {
3068     isl_union_map *Maps = isl_union_map_empty(S->getParamSpace().release());
3069 
3070     for (auto &Array : S->arrays()) {
3071       isl_space *Space = Array->getSpace().release();
3072       Space = isl_space_map_from_set(Space);
3073       isl_map *Identity = isl_map_identity(Space);
3074       Maps = isl_union_map_add_map(Maps, Identity);
3075     }
3076 
3077     return Maps;
3078   }
3079 
3080   /// Create a default-initialized PPCG GPU program.
3081   ///
3082   /// @returns A new gpu program description.
3083   gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) {
3084 
3085     if (!PPCGScop)
3086       return nullptr;
3087 
3088     auto PPCGProg = isl_calloc_type(S->getIslCtx().get(), struct gpu_prog);
3089 
3090     PPCGProg->ctx = S->getIslCtx().get();
3091     PPCGProg->scop = PPCGScop;
3092     PPCGProg->context = isl_set_copy(PPCGScop->context);
3093     PPCGProg->read = isl_union_map_copy(PPCGScop->reads);
3094     PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes);
3095     PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes);
3096     PPCGProg->tagged_must_kill =
3097         isl_union_map_copy(PPCGScop->tagged_must_kills);
3098     PPCGProg->to_inner = getArrayIdentity();
3099     PPCGProg->to_outer = getArrayIdentity();
3100     // TODO: verify that this assignment is correct.
3101     PPCGProg->any_to_outer = nullptr;
3102     PPCGProg->n_stmts = std::distance(S->begin(), S->end());
3103     PPCGProg->stmts = getStatements();
3104 
3105     // Only consider arrays that have a non-empty extent.
3106     // Otherwise, this will cause us to consider the following kinds of
3107     // empty arrays:
3108     //     1. Invariant loads that are represented by SAI objects.
3109     //     2. Arrays with statically known zero size.
3110     auto ValidSAIsRange =
3111         make_filter_range(S->arrays(), [this](ScopArrayInfo *SAI) -> bool {
3112           return !getExtent(SAI).is_empty();
3113         });
3114     SmallVector<ScopArrayInfo *, 4> ValidSAIs(ValidSAIsRange.begin(),
3115                                               ValidSAIsRange.end());
3116 
3117     PPCGProg->n_array =
3118         ValidSAIs.size(); // std::distance(S->array_begin(), S->array_end());
3119     PPCGProg->array = isl_calloc_array(
3120         S->getIslCtx().get(), struct gpu_array_info, PPCGProg->n_array);
3121 
3122     createArrays(PPCGProg, ValidSAIs);
3123 
3124     PPCGProg->array_order = nullptr;
3125     collect_order_dependences(PPCGProg);
3126 
3127     PPCGProg->may_persist = compute_may_persist(PPCGProg);
3128     return PPCGProg;
3129   }
3130 
3131   struct PrintGPUUserData {
3132     struct cuda_info *CudaInfo;
3133     struct gpu_prog *PPCGProg;
3134     std::vector<ppcg_kernel *> Kernels;
3135   };
3136 
3137   /// Print a user statement node in the host code.
3138   ///
3139   /// We use ppcg's printing facilities to print the actual statement and
3140   /// additionally build up a list of all kernels that are encountered in the
3141   /// host ast.
3142   ///
3143   /// @param P The printer to print to
3144   /// @param Options The printing options to use
3145   /// @param Node The node to print
3146   /// @param User A user pointer to carry additional data. This pointer is
3147   ///             expected to be of type PrintGPUUserData.
3148   ///
3149   /// @returns A printer to which the output has been printed.
3150   static __isl_give isl_printer *
3151   printHostUser(__isl_take isl_printer *P,
3152                 __isl_take isl_ast_print_options *Options,
3153                 __isl_take isl_ast_node *Node, void *User) {
3154     auto Data = (struct PrintGPUUserData *)User;
3155     auto Id = isl_ast_node_get_annotation(Node);
3156 
3157     if (Id) {
3158       bool IsUser = !strcmp(isl_id_get_name(Id), "user");
3159 
3160       // If this is a user statement, format it ourselves as ppcg would
3161       // otherwise try to call pet functionality that is not available in
3162       // Polly.
3163       if (IsUser) {
3164         P = isl_printer_start_line(P);
3165         P = isl_printer_print_ast_node(P, Node);
3166         P = isl_printer_end_line(P);
3167         isl_id_free(Id);
3168         isl_ast_print_options_free(Options);
3169         return P;
3170       }
3171 
3172       auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id);
3173       isl_id_free(Id);
3174       Data->Kernels.push_back(Kernel);
3175     }
3176 
3177     return print_host_user(P, Options, Node, User);
3178   }
3179 
3180   /// Print C code corresponding to the control flow in @p Kernel.
3181   ///
3182   /// @param Kernel The kernel to print
3183   void printKernel(ppcg_kernel *Kernel) {
3184     auto *P = isl_printer_to_str(S->getIslCtx().get());
3185     P = isl_printer_set_output_format(P, ISL_FORMAT_C);
3186     auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get());
3187     P = isl_ast_node_print(Kernel->tree, P, Options);
3188     char *String = isl_printer_get_str(P);
3189     outs() << String << "\n";
3190     free(String);
3191     isl_printer_free(P);
3192   }
3193 
3194   /// Print C code corresponding to the GPU code described by @p Tree.
3195   ///
3196   /// @param Tree An AST describing GPU code
3197   /// @param PPCGProg The PPCG program from which @Tree has been constructed.
3198   void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) {
3199     auto *P = isl_printer_to_str(S->getIslCtx().get());
3200     P = isl_printer_set_output_format(P, ISL_FORMAT_C);
3201 
3202     PrintGPUUserData Data;
3203     Data.PPCGProg = PPCGProg;
3204 
3205     auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get());
3206     Options =
3207         isl_ast_print_options_set_print_user(Options, printHostUser, &Data);
3208     P = isl_ast_node_print(Tree, P, Options);
3209     char *String = isl_printer_get_str(P);
3210     outs() << "# host\n";
3211     outs() << String << "\n";
3212     free(String);
3213     isl_printer_free(P);
3214 
3215     for (auto Kernel : Data.Kernels) {
3216       outs() << "# kernel" << Kernel->id << "\n";
3217       printKernel(Kernel);
3218     }
3219   }
3220 
3221   // Generate a GPU program using PPCG.
3222   //
3223   // GPU mapping consists of multiple steps:
3224   //
3225   //  1) Compute new schedule for the program.
3226   //  2) Map schedule to GPU (TODO)
3227   //  3) Generate code for new schedule (TODO)
3228   //
3229   // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
3230   // is mostly CPU specific. Instead, we use PPCG's GPU code generation
3231   // strategy directly from this pass.
3232   gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) {
3233 
3234     auto PPCGGen = isl_calloc_type(S->getIslCtx().get(), struct gpu_gen);
3235 
3236     PPCGGen->ctx = S->getIslCtx().get();
3237     PPCGGen->options = PPCGScop->options;
3238     PPCGGen->print = nullptr;
3239     PPCGGen->print_user = nullptr;
3240     PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt;
3241     PPCGGen->prog = PPCGProg;
3242     PPCGGen->tree = nullptr;
3243     PPCGGen->types.n = 0;
3244     PPCGGen->types.name = nullptr;
3245     PPCGGen->sizes = nullptr;
3246     PPCGGen->used_sizes = nullptr;
3247     PPCGGen->kernel_id = 0;
3248 
3249     // Set scheduling strategy to same strategy PPCG is using.
3250     isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true);
3251     isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true);
3252     isl_options_set_schedule_whole_component(PPCGGen->ctx, false);
3253 
3254     isl_schedule *Schedule = get_schedule(PPCGGen);
3255 
3256     int has_permutable = has_any_permutable_node(Schedule);
3257 
3258     Schedule =
3259         isl_schedule_align_params(Schedule, S->getFullParamSpace().release());
3260 
3261     if (!has_permutable || has_permutable < 0) {
3262       Schedule = isl_schedule_free(Schedule);
3263       LLVM_DEBUG(dbgs() << getUniqueScopName(S)
3264                         << " does not have permutable bands. Bailing out\n";);
3265     } else {
3266       const bool CreateTransferToFromDevice = !PollyManagedMemory;
3267       Schedule = map_to_device(PPCGGen, Schedule, CreateTransferToFromDevice);
3268       PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule));
3269     }
3270 
3271     if (DumpSchedule) {
3272       isl_printer *P = isl_printer_to_str(S->getIslCtx().get());
3273       P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
3274       P = isl_printer_print_str(P, "Schedule\n");
3275       P = isl_printer_print_str(P, "========\n");
3276       if (Schedule)
3277         P = isl_printer_print_schedule(P, Schedule);
3278       else
3279         P = isl_printer_print_str(P, "No schedule found\n");
3280 
3281       outs() << isl_printer_get_str(P) << "\n";
3282       isl_printer_free(P);
3283     }
3284 
3285     if (DumpCode) {
3286       outs() << "Code\n";
3287       outs() << "====\n";
3288       if (PPCGGen->tree)
3289         printGPUTree(PPCGGen->tree, PPCGProg);
3290       else
3291         outs() << "No code generated\n";
3292     }
3293 
3294     isl_schedule_free(Schedule);
3295 
3296     return PPCGGen;
3297   }
3298 
3299   /// Free gpu_gen structure.
3300   ///
3301   /// @param PPCGGen The ppcg_gen object to free.
3302   void freePPCGGen(gpu_gen *PPCGGen) {
3303     isl_ast_node_free(PPCGGen->tree);
3304     isl_union_map_free(PPCGGen->sizes);
3305     isl_union_map_free(PPCGGen->used_sizes);
3306     free(PPCGGen);
3307   }
3308 
3309   /// Free the options in the ppcg scop structure.
3310   ///
3311   /// ppcg is not freeing these options for us. To avoid leaks we do this
3312   /// ourselves.
3313   ///
3314   /// @param PPCGScop The scop referencing the options to free.
3315   void freeOptions(ppcg_scop *PPCGScop) {
3316     free(PPCGScop->options->debug);
3317     PPCGScop->options->debug = nullptr;
3318     free(PPCGScop->options);
3319     PPCGScop->options = nullptr;
3320   }
3321 
3322   /// Approximate the number of points in the set.
3323   ///
3324   /// This function returns an ast expression that overapproximates the number
3325   /// of points in an isl set through the rectangular hull surrounding this set.
3326   ///
3327   /// @param Set   The set to count.
3328   /// @param Build The isl ast build object to use for creating the ast
3329   ///              expression.
3330   ///
3331   /// @returns An approximation of the number of points in the set.
3332   __isl_give isl_ast_expr *approxPointsInSet(__isl_take isl_set *Set,
3333                                              __isl_keep isl_ast_build *Build) {
3334 
3335     isl_val *One = isl_val_int_from_si(isl_set_get_ctx(Set), 1);
3336     auto *Expr = isl_ast_expr_from_val(isl_val_copy(One));
3337 
3338     isl_space *Space = isl_set_get_space(Set);
3339     Space = isl_space_params(Space);
3340     auto *Univ = isl_set_universe(Space);
3341     isl_pw_aff *OneAff = isl_pw_aff_val_on_domain(Univ, One);
3342 
3343     for (long i = 0, n = isl_set_dim(Set, isl_dim_set); i < n; i++) {
3344       isl_pw_aff *Max = isl_set_dim_max(isl_set_copy(Set), i);
3345       isl_pw_aff *Min = isl_set_dim_min(isl_set_copy(Set), i);
3346       isl_pw_aff *DimSize = isl_pw_aff_sub(Max, Min);
3347       DimSize = isl_pw_aff_add(DimSize, isl_pw_aff_copy(OneAff));
3348       auto DimSizeExpr = isl_ast_build_expr_from_pw_aff(Build, DimSize);
3349       Expr = isl_ast_expr_mul(Expr, DimSizeExpr);
3350     }
3351 
3352     isl_set_free(Set);
3353     isl_pw_aff_free(OneAff);
3354 
3355     return Expr;
3356   }
3357 
3358   /// Approximate a number of dynamic instructions executed by a given
3359   /// statement.
3360   ///
3361   /// @param Stmt  The statement for which to compute the number of dynamic
3362   ///              instructions.
3363   /// @param Build The isl ast build object to use for creating the ast
3364   ///              expression.
3365   /// @returns An approximation of the number of dynamic instructions executed
3366   ///          by @p Stmt.
3367   __isl_give isl_ast_expr *approxDynamicInst(ScopStmt &Stmt,
3368                                              __isl_keep isl_ast_build *Build) {
3369     auto Iterations = approxPointsInSet(Stmt.getDomain().release(), Build);
3370 
3371     long InstCount = 0;
3372 
3373     if (Stmt.isBlockStmt()) {
3374       auto *BB = Stmt.getBasicBlock();
3375       InstCount = std::distance(BB->begin(), BB->end());
3376     } else {
3377       auto *R = Stmt.getRegion();
3378 
3379       for (auto *BB : R->blocks()) {
3380         InstCount += std::distance(BB->begin(), BB->end());
3381       }
3382     }
3383 
3384     isl_val *InstVal = isl_val_int_from_si(S->getIslCtx().get(), InstCount);
3385     auto *InstExpr = isl_ast_expr_from_val(InstVal);
3386     return isl_ast_expr_mul(InstExpr, Iterations);
3387   }
3388 
3389   /// Approximate dynamic instructions executed in scop.
3390   ///
3391   /// @param S     The scop for which to approximate dynamic instructions.
3392   /// @param Build The isl ast build object to use for creating the ast
3393   ///              expression.
3394   /// @returns An approximation of the number of dynamic instructions executed
3395   ///          in @p S.
3396   __isl_give isl_ast_expr *
3397   getNumberOfIterations(Scop &S, __isl_keep isl_ast_build *Build) {
3398     isl_ast_expr *Instructions;
3399 
3400     isl_val *Zero = isl_val_int_from_si(S.getIslCtx().get(), 0);
3401     Instructions = isl_ast_expr_from_val(Zero);
3402 
3403     for (ScopStmt &Stmt : S) {
3404       isl_ast_expr *StmtInstructions = approxDynamicInst(Stmt, Build);
3405       Instructions = isl_ast_expr_add(Instructions, StmtInstructions);
3406     }
3407     return Instructions;
3408   }
3409 
3410   /// Create a check that ensures sufficient compute in scop.
3411   ///
3412   /// @param S     The scop for which to ensure sufficient compute.
3413   /// @param Build The isl ast build object to use for creating the ast
3414   ///              expression.
3415   /// @returns An expression that evaluates to TRUE in case of sufficient
3416   ///          compute and to FALSE, otherwise.
3417   __isl_give isl_ast_expr *
3418   createSufficientComputeCheck(Scop &S, __isl_keep isl_ast_build *Build) {
3419     auto Iterations = getNumberOfIterations(S, Build);
3420     auto *MinComputeVal = isl_val_int_from_si(S.getIslCtx().get(), MinCompute);
3421     auto *MinComputeExpr = isl_ast_expr_from_val(MinComputeVal);
3422     return isl_ast_expr_ge(Iterations, MinComputeExpr);
3423   }
3424 
3425   /// Check if the basic block contains a function we cannot codegen for GPU
3426   /// kernels.
3427   ///
3428   /// If this basic block does something with a `Function` other than calling
3429   /// a function that we support in a kernel, return true.
3430   bool containsInvalidKernelFunctionInBlock(const BasicBlock *BB,
3431                                             bool AllowCUDALibDevice) {
3432     for (const Instruction &Inst : *BB) {
3433       const CallInst *Call = dyn_cast<CallInst>(&Inst);
3434       if (Call && isValidFunctionInKernel(Call->getCalledFunction(),
3435                                           AllowCUDALibDevice))
3436         continue;
3437 
3438       for (Value *Op : Inst.operands())
3439         // Look for (<func-type>*) among operands of Inst
3440         if (auto PtrTy = dyn_cast<PointerType>(Op->getType())) {
3441           if (isa<FunctionType>(PtrTy->getElementType())) {
3442             LLVM_DEBUG(dbgs()
3443                        << Inst << " has illegal use of function in kernel.\n");
3444             return true;
3445           }
3446         }
3447     }
3448     return false;
3449   }
3450 
3451   /// Return whether the Scop S uses functions in a way that we do not support.
3452   bool containsInvalidKernelFunction(const Scop &S, bool AllowCUDALibDevice) {
3453     for (auto &Stmt : S) {
3454       if (Stmt.isBlockStmt()) {
3455         if (containsInvalidKernelFunctionInBlock(Stmt.getBasicBlock(),
3456                                                  AllowCUDALibDevice))
3457           return true;
3458       } else {
3459         assert(Stmt.isRegionStmt() &&
3460                "Stmt was neither block nor region statement");
3461         for (const BasicBlock *BB : Stmt.getRegion()->blocks())
3462           if (containsInvalidKernelFunctionInBlock(BB, AllowCUDALibDevice))
3463             return true;
3464       }
3465     }
3466     return false;
3467   }
3468 
3469   /// Generate code for a given GPU AST described by @p Root.
3470   ///
3471   /// @param Root An isl_ast_node pointing to the root of the GPU AST.
3472   /// @param Prog The GPU Program to generate code for.
3473   void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) {
3474     ScopAnnotator Annotator;
3475     Annotator.buildAliasScopes(*S);
3476 
3477     Region *R = &S->getRegion();
3478 
3479     simplifyRegion(R, DT, LI, RI);
3480 
3481     BasicBlock *EnteringBB = R->getEnteringBlock();
3482 
3483     PollyIRBuilder Builder(EnteringBB->getContext(), ConstantFolder(),
3484                            IRInserter(Annotator));
3485     Builder.SetInsertPoint(EnteringBB->getTerminator());
3486 
3487     // Only build the run-time condition and parameters _after_ having
3488     // introduced the conditional branch. This is important as the conditional
3489     // branch will guard the original scop from new induction variables that
3490     // the SCEVExpander may introduce while code generating the parameters and
3491     // which may introduce scalar dependences that prevent us from correctly
3492     // code generating this scop.
3493     BBPair StartExitBlocks;
3494     BranchInst *CondBr = nullptr;
3495     std::tie(StartExitBlocks, CondBr) =
3496         executeScopConditionally(*S, Builder.getTrue(), *DT, *RI, *LI);
3497     BasicBlock *StartBlock = std::get<0>(StartExitBlocks);
3498 
3499     assert(CondBr && "CondBr not initialized by executeScopConditionally");
3500 
3501     GPUNodeBuilder NodeBuilder(Builder, Annotator, *DL, *LI, *SE, *DT, *S,
3502                                StartBlock, Prog, Runtime, Architecture);
3503 
3504     // TODO: Handle LICM
3505     auto SplitBlock = StartBlock->getSinglePredecessor();
3506     Builder.SetInsertPoint(SplitBlock->getTerminator());
3507 
3508     isl_ast_build *Build = isl_ast_build_alloc(S->getIslCtx().get());
3509     isl_ast_expr *Condition = IslAst::buildRunCondition(*S, Build);
3510     isl_ast_expr *SufficientCompute = createSufficientComputeCheck(*S, Build);
3511     Condition = isl_ast_expr_and(Condition, SufficientCompute);
3512     isl_ast_build_free(Build);
3513 
3514     // preload invariant loads. Note: This should happen before the RTC
3515     // because the RTC may depend on values that are invariant load hoisted.
3516     if (!NodeBuilder.preloadInvariantLoads()) {
3517       // Patch the introduced branch condition to ensure that we always execute
3518       // the original SCoP.
3519       auto *FalseI1 = Builder.getFalse();
3520       auto *SplitBBTerm = Builder.GetInsertBlock()->getTerminator();
3521       SplitBBTerm->setOperand(0, FalseI1);
3522 
3523       LLVM_DEBUG(dbgs() << "preloading invariant loads failed in function: " +
3524                                S->getFunction().getName() +
3525                                " | Scop Region: " + S->getNameStr());
3526       // adjust the dominator tree accordingly.
3527       auto *ExitingBlock = StartBlock->getUniqueSuccessor();
3528       assert(ExitingBlock);
3529       auto *MergeBlock = ExitingBlock->getUniqueSuccessor();
3530       assert(MergeBlock);
3531       polly::markBlockUnreachable(*StartBlock, Builder);
3532       polly::markBlockUnreachable(*ExitingBlock, Builder);
3533       auto *ExitingBB = S->getExitingBlock();
3534       assert(ExitingBB);
3535 
3536       DT->changeImmediateDominator(MergeBlock, ExitingBB);
3537       DT->eraseNode(ExitingBlock);
3538       isl_ast_expr_free(Condition);
3539       isl_ast_node_free(Root);
3540     } else {
3541 
3542       if (polly::PerfMonitoring) {
3543         PerfMonitor P(*S, EnteringBB->getParent()->getParent());
3544         P.initialize();
3545         P.insertRegionStart(SplitBlock->getTerminator());
3546 
3547         // TODO: actually think if this is the correct exiting block to place
3548         // the `end` performance marker. Invariant load hoisting changes
3549         // the CFG in a way that I do not precisely understand, so I
3550         // (Siddharth<[email protected]>) should come back to this and
3551         // think about which exiting block to use.
3552         auto *ExitingBlock = StartBlock->getUniqueSuccessor();
3553         assert(ExitingBlock);
3554         BasicBlock *MergeBlock = ExitingBlock->getUniqueSuccessor();
3555         P.insertRegionEnd(MergeBlock->getTerminator());
3556       }
3557 
3558       NodeBuilder.addParameters(S->getContext().release());
3559       Value *RTC = NodeBuilder.createRTC(Condition);
3560       Builder.GetInsertBlock()->getTerminator()->setOperand(0, RTC);
3561 
3562       Builder.SetInsertPoint(&*StartBlock->begin());
3563 
3564       NodeBuilder.create(Root);
3565     }
3566 
3567     /// In case a sequential kernel has more surrounding loops as any parallel
3568     /// kernel, the SCoP is probably mostly sequential. Hence, there is no
3569     /// point in running it on a GPU.
3570     if (NodeBuilder.DeepestSequential > NodeBuilder.DeepestParallel)
3571       CondBr->setOperand(0, Builder.getFalse());
3572 
3573     if (!NodeBuilder.BuildSuccessful)
3574       CondBr->setOperand(0, Builder.getFalse());
3575   }
3576 
3577   bool runOnScop(Scop &CurrentScop) override {
3578     S = &CurrentScop;
3579     LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3580     DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3581     SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3582     DL = &S->getRegion().getEntry()->getModule()->getDataLayout();
3583     RI = &getAnalysis<RegionInfoPass>().getRegionInfo();
3584 
3585     LLVM_DEBUG(dbgs() << "PPCGCodeGen running on : " << getUniqueScopName(S)
3586                       << " | loop depth: " << S->getMaxLoopDepth() << "\n");
3587 
3588     // We currently do not support functions other than intrinsics inside
3589     // kernels, as code generation will need to offload function calls to the
3590     // kernel. This may lead to a kernel trying to call a function on the host.
3591     // This also allows us to prevent codegen from trying to take the
3592     // address of an intrinsic function to send to the kernel.
3593     if (containsInvalidKernelFunction(CurrentScop,
3594                                       Architecture == GPUArch::NVPTX64)) {
3595       LLVM_DEBUG(
3596           dbgs() << getUniqueScopName(S)
3597                  << " contains function which cannot be materialised in a GPU "
3598                     "kernel. Bailing out.\n";);
3599       return false;
3600     }
3601 
3602     auto PPCGScop = createPPCGScop();
3603     auto PPCGProg = createPPCGProg(PPCGScop);
3604     auto PPCGGen = generateGPU(PPCGScop, PPCGProg);
3605 
3606     if (PPCGGen->tree) {
3607       generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg);
3608       CurrentScop.markAsToBeSkipped();
3609     } else {
3610       LLVM_DEBUG(dbgs() << getUniqueScopName(S)
3611                         << " has empty PPCGGen->tree. Bailing out.\n");
3612     }
3613 
3614     freeOptions(PPCGScop);
3615     freePPCGGen(PPCGGen);
3616     gpu_prog_free(PPCGProg);
3617     ppcg_scop_free(PPCGScop);
3618 
3619     return true;
3620   }
3621 
3622   void printScop(raw_ostream &, Scop &) const override {}
3623 
3624   void getAnalysisUsage(AnalysisUsage &AU) const override {
3625     ScopPass::getAnalysisUsage(AU);
3626 
3627     AU.addRequired<DominatorTreeWrapperPass>();
3628     AU.addRequired<RegionInfoPass>();
3629     AU.addRequired<ScalarEvolutionWrapperPass>();
3630     AU.addRequired<ScopDetectionWrapperPass>();
3631     AU.addRequired<ScopInfoRegionPass>();
3632     AU.addRequired<LoopInfoWrapperPass>();
3633 
3634     // FIXME: We do not yet add regions for the newly generated code to the
3635     //        region tree.
3636   }
3637 };
3638 } // namespace
3639 
3640 char PPCGCodeGeneration::ID = 1;
3641 
3642 Pass *polly::createPPCGCodeGenerationPass(GPUArch Arch, GPURuntime Runtime) {
3643   PPCGCodeGeneration *generator = new PPCGCodeGeneration();
3644   generator->Runtime = Runtime;
3645   generator->Architecture = Arch;
3646   return generator;
3647 }
3648 
3649 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg",
3650                       "Polly - Apply PPCG translation to SCOP", false, false)
3651 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
3652 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
3653 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
3654 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
3655 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
3656 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
3657 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg",
3658                     "Polly - Apply PPCG translation to SCOP", false, false)
3659