1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
11 // GPU mapping strategy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "polly/CodeGen/PPCGCodeGeneration.h"
16 #include "polly/CodeGen/IslAst.h"
17 #include "polly/CodeGen/IslNodeBuilder.h"
18 #include "polly/CodeGen/Utils.h"
19 #include "polly/DependenceInfo.h"
20 #include "polly/LinkAllPasses.h"
21 #include "polly/Options.h"
22 #include "polly/ScopDetection.h"
23 #include "polly/ScopInfo.h"
24 #include "polly/Support/SCEVValidator.h"
25 #include "llvm/ADT/PostOrderIterator.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/BasicAliasAnalysis.h"
28 #include "llvm/Analysis/GlobalsModRef.h"
29 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
30 #include "llvm/Analysis/TargetLibraryInfo.h"
31 #include "llvm/Analysis/TargetTransformInfo.h"
32 #include "llvm/IR/LegacyPassManager.h"
33 #include "llvm/IR/Verifier.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/TargetSelect.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 
40 #include "isl/union_map.h"
41 
42 extern "C" {
43 #include "ppcg/cuda.h"
44 #include "ppcg/gpu.h"
45 #include "ppcg/gpu_print.h"
46 #include "ppcg/ppcg.h"
47 #include "ppcg/schedule.h"
48 }
49 
50 #include "llvm/Support/Debug.h"
51 
52 using namespace polly;
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "polly-codegen-ppcg"
56 
57 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule",
58                                   cl::desc("Dump the computed GPU Schedule"),
59                                   cl::Hidden, cl::init(false), cl::ZeroOrMore,
60                                   cl::cat(PollyCategory));
61 
62 static cl::opt<bool>
63     DumpCode("polly-acc-dump-code",
64              cl::desc("Dump C code describing the GPU mapping"), cl::Hidden,
65              cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
66 
67 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
68                                   cl::desc("Dump the kernel LLVM-IR"),
69                                   cl::Hidden, cl::init(false), cl::ZeroOrMore,
70                                   cl::cat(PollyCategory));
71 
72 static cl::opt<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
73                                    cl::desc("Dump the kernel assembly code"),
74                                    cl::Hidden, cl::init(false), cl::ZeroOrMore,
75                                    cl::cat(PollyCategory));
76 
77 static cl::opt<bool> FastMath("polly-acc-fastmath",
78                               cl::desc("Allow unsafe math optimizations"),
79                               cl::Hidden, cl::init(false), cl::ZeroOrMore,
80                               cl::cat(PollyCategory));
81 static cl::opt<bool> SharedMemory("polly-acc-use-shared",
82                                   cl::desc("Use shared memory"), cl::Hidden,
83                                   cl::init(false), cl::ZeroOrMore,
84                                   cl::cat(PollyCategory));
85 static cl::opt<bool> PrivateMemory("polly-acc-use-private",
86                                    cl::desc("Use private memory"), cl::Hidden,
87                                    cl::init(false), cl::ZeroOrMore,
88                                    cl::cat(PollyCategory));
89 
90 static cl::opt<bool> ManagedMemory("polly-acc-codegen-managed-memory",
91                                    cl::desc("Generate Host kernel code assuming"
92                                             " that all memory has been"
93                                             " declared as managed memory"),
94                                    cl::Hidden, cl::init(false), cl::ZeroOrMore,
95                                    cl::cat(PollyCategory));
96 
97 static cl::opt<std::string>
98     CudaVersion("polly-acc-cuda-version",
99                 cl::desc("The CUDA version to compile for"), cl::Hidden,
100                 cl::init("sm_30"), cl::ZeroOrMore, cl::cat(PollyCategory));
101 
102 static cl::opt<int>
103     MinCompute("polly-acc-mincompute",
104                cl::desc("Minimal number of compute statements to run on GPU."),
105                cl::Hidden, cl::init(10 * 512 * 512));
106 
107 /// Create the ast expressions for a ScopStmt.
108 ///
109 /// This function is a callback for to generate the ast expressions for each
110 /// of the scheduled ScopStmts.
111 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt(
112     void *StmtT, isl_ast_build *Build,
113     isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA,
114                                        isl_id *Id, void *User),
115     void *UserIndex,
116     isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User),
117     void *UserExpr) {
118 
119   ScopStmt *Stmt = (ScopStmt *)StmtT;
120 
121   isl_ctx *Ctx;
122 
123   if (!Stmt || !Build)
124     return NULL;
125 
126   Ctx = isl_ast_build_get_ctx(Build);
127   isl_id_to_ast_expr *RefToExpr = isl_id_to_ast_expr_alloc(Ctx, 0);
128 
129   for (MemoryAccess *Acc : *Stmt) {
130     isl_map *AddrFunc = Acc->getAddressFunction();
131     AddrFunc = isl_map_intersect_domain(AddrFunc, Stmt->getDomain());
132     isl_id *RefId = Acc->getId();
133     isl_pw_multi_aff *PMA = isl_pw_multi_aff_from_map(AddrFunc);
134     isl_multi_pw_aff *MPA = isl_multi_pw_aff_from_pw_multi_aff(PMA);
135     MPA = isl_multi_pw_aff_coalesce(MPA);
136     MPA = FunctionIndex(MPA, RefId, UserIndex);
137     isl_ast_expr *Access = isl_ast_build_access_from_multi_pw_aff(Build, MPA);
138     Access = FunctionExpr(Access, RefId, UserExpr);
139     RefToExpr = isl_id_to_ast_expr_set(RefToExpr, RefId, Access);
140   }
141 
142   return RefToExpr;
143 }
144 
145 /// Given a LLVM Type, compute its size in bytes,
146 static int computeSizeInBytes(const Type *T) {
147   int bytes = T->getPrimitiveSizeInBits() / 8;
148   if (bytes == 0)
149     bytes = T->getScalarSizeInBits() / 8;
150   return bytes;
151 }
152 
153 /// Generate code for a GPU specific isl AST.
154 ///
155 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
156 /// generates code for general-purpose AST nodes, with special functionality
157 /// for generating GPU specific user nodes.
158 ///
159 /// @see GPUNodeBuilder::createUser
160 class GPUNodeBuilder : public IslNodeBuilder {
161 public:
162   GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator,
163                  const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE,
164                  DominatorTree &DT, Scop &S, BasicBlock *StartBlock,
165                  gpu_prog *Prog, GPURuntime Runtime, GPUArch Arch)
166       : IslNodeBuilder(Builder, Annotator, DL, LI, SE, DT, S, StartBlock),
167         Prog(Prog), Runtime(Runtime), Arch(Arch) {
168     getExprBuilder().setIDToSAI(&IDToSAI);
169   }
170 
171   /// Create after-run-time-check initialization code.
172   void initializeAfterRTH();
173 
174   /// Finalize the generated scop.
175   virtual void finalize();
176 
177   /// Track if the full build process was successful.
178   ///
179   /// This value is set to false, if throughout the build process an error
180   /// occurred which prevents us from generating valid GPU code.
181   bool BuildSuccessful = true;
182 
183   /// The maximal number of loops surrounding a sequential kernel.
184   unsigned DeepestSequential = 0;
185 
186   /// The maximal number of loops surrounding a parallel kernel.
187   unsigned DeepestParallel = 0;
188 
189 private:
190   /// A vector of array base pointers for which a new ScopArrayInfo was created.
191   ///
192   /// This vector is used to delete the ScopArrayInfo when it is not needed any
193   /// more.
194   std::vector<Value *> LocalArrays;
195 
196   /// A map from ScopArrays to their corresponding device allocations.
197   std::map<ScopArrayInfo *, Value *> DeviceAllocations;
198 
199   /// The current GPU context.
200   Value *GPUContext;
201 
202   /// The set of isl_ids allocated in the kernel
203   std::vector<isl_id *> KernelIds;
204 
205   /// A module containing GPU code.
206   ///
207   /// This pointer is only set in case we are currently generating GPU code.
208   std::unique_ptr<Module> GPUModule;
209 
210   /// The GPU program we generate code for.
211   gpu_prog *Prog;
212 
213   /// The GPU Runtime implementation to use (OpenCL or CUDA).
214   GPURuntime Runtime;
215 
216   /// The GPU Architecture to target.
217   GPUArch Arch;
218 
219   /// Class to free isl_ids.
220   class IslIdDeleter {
221   public:
222     void operator()(__isl_take isl_id *Id) { isl_id_free(Id); };
223   };
224 
225   /// A set containing all isl_ids allocated in a GPU kernel.
226   ///
227   /// By releasing this set all isl_ids will be freed.
228   std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs;
229 
230   IslExprBuilder::IDToScopArrayInfoTy IDToSAI;
231 
232   /// Create code for user-defined AST nodes.
233   ///
234   /// These AST nodes can be of type:
235   ///
236   ///   - ScopStmt:      A computational statement (TODO)
237   ///   - Kernel:        A GPU kernel call (TODO)
238   ///   - Data-Transfer: A GPU <-> CPU data-transfer
239   ///   - In-kernel synchronization
240   ///   - In-kernel memory copy statement
241   ///
242   /// @param UserStmt The ast node to generate code for.
243   virtual void createUser(__isl_take isl_ast_node *UserStmt);
244 
245   enum DataDirection { HOST_TO_DEVICE, DEVICE_TO_HOST };
246 
247   /// Create code for a data transfer statement
248   ///
249   /// @param TransferStmt The data transfer statement.
250   /// @param Direction The direction in which to transfer data.
251   void createDataTransfer(__isl_take isl_ast_node *TransferStmt,
252                           enum DataDirection Direction);
253 
254   /// Find llvm::Values referenced in GPU kernel.
255   ///
256   /// @param Kernel The kernel to scan for llvm::Values
257   ///
258   /// @returns A pair, whose first element contains the set of values
259   ///          referenced by the kernel, and whose second element contains the
260   ///          set of functions referenced by the kernel. All functions in the
261   ///          second set satisfy isValidFunctionInKernel.
262   std::pair<SetVector<Value *>, SetVector<Function *>>
263   getReferencesInKernel(ppcg_kernel *Kernel);
264 
265   /// Compute the sizes of the execution grid for a given kernel.
266   ///
267   /// @param Kernel The kernel to compute grid sizes for.
268   ///
269   /// @returns A tuple with grid sizes for X and Y dimension
270   std::tuple<Value *, Value *> getGridSizes(ppcg_kernel *Kernel);
271 
272   /// Creates a array that can be sent to the kernel on the device using a
273   /// host pointer. This is required for managed memory, when we directly send
274   /// host pointers to the device.
275   /// \note
276   /// This is to be used only with managed memory
277   Value *getOrCreateManagedDeviceArray(gpu_array_info *Array,
278                                        ScopArrayInfo *ArrayInfo);
279 
280   /// Compute the sizes of the thread blocks for a given kernel.
281   ///
282   /// @param Kernel The kernel to compute thread block sizes for.
283   ///
284   /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
285   std::tuple<Value *, Value *, Value *> getBlockSizes(ppcg_kernel *Kernel);
286 
287   /// Store a specific kernel launch parameter in the array of kernel launch
288   /// parameters.
289   ///
290   /// @param Parameters The list of parameters in which to store.
291   /// @param Param      The kernel launch parameter to store.
292   /// @param Index      The index in the parameter list, at which to store the
293   ///                   parameter.
294   void insertStoreParameter(Instruction *Parameters, Instruction *Param,
295                             int Index);
296 
297   /// Create kernel launch parameters.
298   ///
299   /// @param Kernel        The kernel to create parameters for.
300   /// @param F             The kernel function that has been created.
301   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
302   ///
303   /// @returns A stack allocated array with pointers to the parameter
304   ///          values that are passed to the kernel.
305   Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F,
306                                 SetVector<Value *> SubtreeValues);
307 
308   /// Create declarations for kernel variable.
309   ///
310   /// This includes shared memory declarations.
311   ///
312   /// @param Kernel        The kernel definition to create variables for.
313   /// @param FN            The function into which to generate the variables.
314   void createKernelVariables(ppcg_kernel *Kernel, Function *FN);
315 
316   /// Add CUDA annotations to module.
317   ///
318   /// Add a set of CUDA annotations that declares the maximal block dimensions
319   /// that will be used to execute the CUDA kernel. This allows the NVIDIA
320   /// PTX compiler to bound the number of allocated registers to ensure the
321   /// resulting kernel is known to run with up to as many block dimensions
322   /// as specified here.
323   ///
324   /// @param M         The module to add the annotations to.
325   /// @param BlockDimX The size of block dimension X.
326   /// @param BlockDimY The size of block dimension Y.
327   /// @param BlockDimZ The size of block dimension Z.
328   void addCUDAAnnotations(Module *M, Value *BlockDimX, Value *BlockDimY,
329                           Value *BlockDimZ);
330 
331   /// Create GPU kernel.
332   ///
333   /// Code generate the kernel described by @p KernelStmt.
334   ///
335   /// @param KernelStmt The ast node to generate kernel code for.
336   void createKernel(__isl_take isl_ast_node *KernelStmt);
337 
338   /// Generate code that computes the size of an array.
339   ///
340   /// @param Array The array for which to compute a size.
341   Value *getArraySize(gpu_array_info *Array);
342 
343   /// Generate code to compute the minimal offset at which an array is accessed.
344   ///
345   /// The offset of an array is the minimal array location accessed in a scop.
346   ///
347   /// Example:
348   ///
349   ///   for (long i = 0; i < 100; i++)
350   ///     A[i + 42] += ...
351   ///
352   ///   getArrayOffset(A) results in 42.
353   ///
354   /// @param Array The array for which to compute the offset.
355   /// @returns An llvm::Value that contains the offset of the array.
356   Value *getArrayOffset(gpu_array_info *Array);
357 
358   /// Prepare the kernel arguments for kernel code generation
359   ///
360   /// @param Kernel The kernel to generate code for.
361   /// @param FN     The function created for the kernel.
362   void prepareKernelArguments(ppcg_kernel *Kernel, Function *FN);
363 
364   /// Create kernel function.
365   ///
366   /// Create a kernel function located in a newly created module that can serve
367   /// as target for device code generation. Set the Builder to point to the
368   /// start block of this newly created function.
369   ///
370   /// @param Kernel The kernel to generate code for.
371   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
372   /// @param SubtreeFunctions The set of llvm::Functions referenced by this
373   ///                         kernel.
374   void createKernelFunction(ppcg_kernel *Kernel,
375                             SetVector<Value *> &SubtreeValues,
376                             SetVector<Function *> &SubtreeFunctions);
377 
378   /// Create the declaration of a kernel function.
379   ///
380   /// The kernel function takes as arguments:
381   ///
382   ///   - One i8 pointer for each external array reference used in the kernel.
383   ///   - Host iterators
384   ///   - Parameters
385   ///   - Other LLVM Value references (TODO)
386   ///
387   /// @param Kernel The kernel to generate the function declaration for.
388   /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
389   ///
390   /// @returns The newly declared function.
391   Function *createKernelFunctionDecl(ppcg_kernel *Kernel,
392                                      SetVector<Value *> &SubtreeValues);
393 
394   /// Insert intrinsic functions to obtain thread and block ids.
395   ///
396   /// @param The kernel to generate the intrinsic functions for.
397   void insertKernelIntrinsics(ppcg_kernel *Kernel);
398 
399   /// Setup the creation of functions referenced by the GPU kernel.
400   ///
401   /// 1. Create new function declarations in GPUModule which are the same as
402   /// SubtreeFunctions.
403   ///
404   /// 2. Populate IslNodeBuilder::ValueMap with mappings from
405   /// old functions (that come from the original module) to new functions
406   /// (that are created within GPUModule). That way, we generate references
407   /// to the correct function (in GPUModule) in BlockGenerator.
408   ///
409   /// @see IslNodeBuilder::ValueMap
410   /// @see BlockGenerator::GlobalMap
411   /// @see BlockGenerator::getNewValue
412   /// @see GPUNodeBuilder::getReferencesInKernel.
413   ///
414   /// @param SubtreeFunctions The set of llvm::Functions referenced by
415   ///                         this kernel.
416   void setupKernelSubtreeFunctions(SetVector<Function *> SubtreeFunctions);
417 
418   /// Create a global-to-shared or shared-to-global copy statement.
419   ///
420   /// @param CopyStmt The copy statement to generate code for
421   void createKernelCopy(ppcg_kernel_stmt *CopyStmt);
422 
423   /// Create code for a ScopStmt called in @p Expr.
424   ///
425   /// @param Expr The expression containing the call.
426   /// @param KernelStmt The kernel statement referenced in the call.
427   void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt);
428 
429   /// Create an in-kernel synchronization call.
430   void createKernelSync();
431 
432   /// Create a PTX assembly string for the current GPU kernel.
433   ///
434   /// @returns A string containing the corresponding PTX assembly code.
435   std::string createKernelASM();
436 
437   /// Remove references from the dominator tree to the kernel function @p F.
438   ///
439   /// @param F The function to remove references to.
440   void clearDominators(Function *F);
441 
442   /// Remove references from scalar evolution to the kernel function @p F.
443   ///
444   /// @param F The function to remove references to.
445   void clearScalarEvolution(Function *F);
446 
447   /// Remove references from loop info to the kernel function @p F.
448   ///
449   /// @param F The function to remove references to.
450   void clearLoops(Function *F);
451 
452   /// Finalize the generation of the kernel function.
453   ///
454   /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
455   /// dump its IR to stderr.
456   ///
457   /// @returns The Assembly string of the kernel.
458   std::string finalizeKernelFunction();
459 
460   /// Finalize the generation of the kernel arguments.
461   ///
462   /// This function ensures that not-read-only scalars used in a kernel are
463   /// stored back to the global memory location they are backed with before
464   /// the kernel terminates.
465   ///
466   /// @params Kernel The kernel to finalize kernel arguments for.
467   void finalizeKernelArguments(ppcg_kernel *Kernel);
468 
469   /// Create code that allocates memory to store arrays on device.
470   void allocateDeviceArrays();
471 
472   /// Free all allocated device arrays.
473   void freeDeviceArrays();
474 
475   /// Create a call to initialize the GPU context.
476   ///
477   /// @returns A pointer to the newly initialized context.
478   Value *createCallInitContext();
479 
480   /// Create a call to get the device pointer for a kernel allocation.
481   ///
482   /// @param Allocation The Polly GPU allocation
483   ///
484   /// @returns The device parameter corresponding to this allocation.
485   Value *createCallGetDevicePtr(Value *Allocation);
486 
487   /// Create a call to free the GPU context.
488   ///
489   /// @param Context A pointer to an initialized GPU context.
490   void createCallFreeContext(Value *Context);
491 
492   /// Create a call to allocate memory on the device.
493   ///
494   /// @param Size The size of memory to allocate
495   ///
496   /// @returns A pointer that identifies this allocation.
497   Value *createCallAllocateMemoryForDevice(Value *Size);
498 
499   /// Create a call to free a device array.
500   ///
501   /// @param Array The device array to free.
502   void createCallFreeDeviceMemory(Value *Array);
503 
504   /// Create a call to copy data from host to device.
505   ///
506   /// @param HostPtr A pointer to the host data that should be copied.
507   /// @param DevicePtr A device pointer specifying the location to copy to.
508   void createCallCopyFromHostToDevice(Value *HostPtr, Value *DevicePtr,
509                                       Value *Size);
510 
511   /// Create a call to copy data from device to host.
512   ///
513   /// @param DevicePtr A pointer to the device data that should be copied.
514   /// @param HostPtr A host pointer specifying the location to copy to.
515   void createCallCopyFromDeviceToHost(Value *DevicePtr, Value *HostPtr,
516                                       Value *Size);
517 
518   /// Create a call to synchronize Host & Device.
519   /// \note
520   /// This is to be used only with managed memory.
521   void createCallSynchronizeDevice();
522 
523   /// Create a call to get a kernel from an assembly string.
524   ///
525   /// @param Buffer The string describing the kernel.
526   /// @param Entry  The name of the kernel function to call.
527   ///
528   /// @returns A pointer to a kernel object
529   Value *createCallGetKernel(Value *Buffer, Value *Entry);
530 
531   /// Create a call to free a GPU kernel.
532   ///
533   /// @param GPUKernel THe kernel to free.
534   void createCallFreeKernel(Value *GPUKernel);
535 
536   /// Create a call to launch a GPU kernel.
537   ///
538   /// @param GPUKernel  The kernel to launch.
539   /// @param GridDimX   The size of the first grid dimension.
540   /// @param GridDimY   The size of the second grid dimension.
541   /// @param GridBlockX The size of the first block dimension.
542   /// @param GridBlockY The size of the second block dimension.
543   /// @param GridBlockZ The size of the third block dimension.
544   /// @param Parameters A pointer to an array that contains itself pointers to
545   ///                   the parameter values passed for each kernel argument.
546   void createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
547                               Value *GridDimY, Value *BlockDimX,
548                               Value *BlockDimY, Value *BlockDimZ,
549                               Value *Parameters);
550 };
551 
552 void GPUNodeBuilder::initializeAfterRTH() {
553   BasicBlock *NewBB = SplitBlock(Builder.GetInsertBlock(),
554                                  &*Builder.GetInsertPoint(), &DT, &LI);
555   NewBB->setName("polly.acc.initialize");
556   Builder.SetInsertPoint(&NewBB->front());
557 
558   GPUContext = createCallInitContext();
559 
560   if (!ManagedMemory)
561     allocateDeviceArrays();
562 }
563 
564 void GPUNodeBuilder::finalize() {
565   if (!ManagedMemory)
566     freeDeviceArrays();
567 
568   createCallFreeContext(GPUContext);
569   IslNodeBuilder::finalize();
570 }
571 
572 void GPUNodeBuilder::allocateDeviceArrays() {
573   assert(!ManagedMemory && "Managed memory will directly send host pointers "
574                            "to the kernel. There is no need for device arrays");
575   isl_ast_build *Build = isl_ast_build_from_context(S.getContext());
576 
577   for (int i = 0; i < Prog->n_array; ++i) {
578     gpu_array_info *Array = &Prog->array[i];
579     auto *ScopArray = (ScopArrayInfo *)Array->user;
580     std::string DevArrayName("p_dev_array_");
581     DevArrayName.append(Array->name);
582 
583     Value *ArraySize = getArraySize(Array);
584     Value *Offset = getArrayOffset(Array);
585     if (Offset)
586       ArraySize = Builder.CreateSub(
587           ArraySize,
588           Builder.CreateMul(Offset,
589                             Builder.getInt64(ScopArray->getElemSizeInBytes())));
590     Value *DevArray = createCallAllocateMemoryForDevice(ArraySize);
591     DevArray->setName(DevArrayName);
592     DeviceAllocations[ScopArray] = DevArray;
593   }
594 
595   isl_ast_build_free(Build);
596 }
597 
598 void GPUNodeBuilder::addCUDAAnnotations(Module *M, Value *BlockDimX,
599                                         Value *BlockDimY, Value *BlockDimZ) {
600   auto AnnotationNode = M->getOrInsertNamedMetadata("nvvm.annotations");
601 
602   for (auto &F : *M) {
603     if (F.getCallingConv() != CallingConv::PTX_Kernel)
604       continue;
605 
606     Value *V[] = {BlockDimX, BlockDimY, BlockDimZ};
607 
608     Metadata *Elements[] = {
609         ValueAsMetadata::get(&F),   MDString::get(M->getContext(), "maxntidx"),
610         ValueAsMetadata::get(V[0]), MDString::get(M->getContext(), "maxntidy"),
611         ValueAsMetadata::get(V[1]), MDString::get(M->getContext(), "maxntidz"),
612         ValueAsMetadata::get(V[2]),
613     };
614     MDNode *Node = MDNode::get(M->getContext(), Elements);
615     AnnotationNode->addOperand(Node);
616   }
617 }
618 
619 void GPUNodeBuilder::freeDeviceArrays() {
620   assert(!ManagedMemory && "Managed memory does not use device arrays");
621   for (auto &Array : DeviceAllocations)
622     createCallFreeDeviceMemory(Array.second);
623 }
624 
625 Value *GPUNodeBuilder::createCallGetKernel(Value *Buffer, Value *Entry) {
626   const char *Name = "polly_getKernel";
627   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
628   Function *F = M->getFunction(Name);
629 
630   // If F is not available, declare it.
631   if (!F) {
632     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
633     std::vector<Type *> Args;
634     Args.push_back(Builder.getInt8PtrTy());
635     Args.push_back(Builder.getInt8PtrTy());
636     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
637     F = Function::Create(Ty, Linkage, Name, M);
638   }
639 
640   return Builder.CreateCall(F, {Buffer, Entry});
641 }
642 
643 Value *GPUNodeBuilder::createCallGetDevicePtr(Value *Allocation) {
644   const char *Name = "polly_getDevicePtr";
645   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
646   Function *F = M->getFunction(Name);
647 
648   // If F is not available, declare it.
649   if (!F) {
650     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
651     std::vector<Type *> Args;
652     Args.push_back(Builder.getInt8PtrTy());
653     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
654     F = Function::Create(Ty, Linkage, Name, M);
655   }
656 
657   return Builder.CreateCall(F, {Allocation});
658 }
659 
660 void GPUNodeBuilder::createCallLaunchKernel(Value *GPUKernel, Value *GridDimX,
661                                             Value *GridDimY, Value *BlockDimX,
662                                             Value *BlockDimY, Value *BlockDimZ,
663                                             Value *Parameters) {
664   const char *Name = "polly_launchKernel";
665   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
666   Function *F = M->getFunction(Name);
667 
668   // If F is not available, declare it.
669   if (!F) {
670     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
671     std::vector<Type *> Args;
672     Args.push_back(Builder.getInt8PtrTy());
673     Args.push_back(Builder.getInt32Ty());
674     Args.push_back(Builder.getInt32Ty());
675     Args.push_back(Builder.getInt32Ty());
676     Args.push_back(Builder.getInt32Ty());
677     Args.push_back(Builder.getInt32Ty());
678     Args.push_back(Builder.getInt8PtrTy());
679     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
680     F = Function::Create(Ty, Linkage, Name, M);
681   }
682 
683   Builder.CreateCall(F, {GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
684                          BlockDimZ, Parameters});
685 }
686 
687 void GPUNodeBuilder::createCallFreeKernel(Value *GPUKernel) {
688   const char *Name = "polly_freeKernel";
689   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
690   Function *F = M->getFunction(Name);
691 
692   // If F is not available, declare it.
693   if (!F) {
694     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
695     std::vector<Type *> Args;
696     Args.push_back(Builder.getInt8PtrTy());
697     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
698     F = Function::Create(Ty, Linkage, Name, M);
699   }
700 
701   Builder.CreateCall(F, {GPUKernel});
702 }
703 
704 void GPUNodeBuilder::createCallFreeDeviceMemory(Value *Array) {
705   assert(!ManagedMemory && "Managed memory does not allocate or free memory "
706                            "for device");
707   const char *Name = "polly_freeDeviceMemory";
708   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
709   Function *F = M->getFunction(Name);
710 
711   // If F is not available, declare it.
712   if (!F) {
713     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
714     std::vector<Type *> Args;
715     Args.push_back(Builder.getInt8PtrTy());
716     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
717     F = Function::Create(Ty, Linkage, Name, M);
718   }
719 
720   Builder.CreateCall(F, {Array});
721 }
722 
723 Value *GPUNodeBuilder::createCallAllocateMemoryForDevice(Value *Size) {
724   assert(!ManagedMemory && "Managed memory does not allocate or free memory "
725                            "for device");
726   const char *Name = "polly_allocateMemoryForDevice";
727   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
728   Function *F = M->getFunction(Name);
729 
730   // If F is not available, declare it.
731   if (!F) {
732     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
733     std::vector<Type *> Args;
734     Args.push_back(Builder.getInt64Ty());
735     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
736     F = Function::Create(Ty, Linkage, Name, M);
737   }
738 
739   return Builder.CreateCall(F, {Size});
740 }
741 
742 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value *HostData,
743                                                     Value *DeviceData,
744                                                     Value *Size) {
745   assert(!ManagedMemory && "Managed memory does not transfer memory between "
746                            "device and host");
747   const char *Name = "polly_copyFromHostToDevice";
748   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
749   Function *F = M->getFunction(Name);
750 
751   // If F is not available, declare it.
752   if (!F) {
753     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
754     std::vector<Type *> Args;
755     Args.push_back(Builder.getInt8PtrTy());
756     Args.push_back(Builder.getInt8PtrTy());
757     Args.push_back(Builder.getInt64Ty());
758     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
759     F = Function::Create(Ty, Linkage, Name, M);
760   }
761 
762   Builder.CreateCall(F, {HostData, DeviceData, Size});
763 }
764 
765 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value *DeviceData,
766                                                     Value *HostData,
767                                                     Value *Size) {
768   assert(!ManagedMemory && "Managed memory does not transfer memory between "
769                            "device and host");
770   const char *Name = "polly_copyFromDeviceToHost";
771   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
772   Function *F = M->getFunction(Name);
773 
774   // If F is not available, declare it.
775   if (!F) {
776     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
777     std::vector<Type *> Args;
778     Args.push_back(Builder.getInt8PtrTy());
779     Args.push_back(Builder.getInt8PtrTy());
780     Args.push_back(Builder.getInt64Ty());
781     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
782     F = Function::Create(Ty, Linkage, Name, M);
783   }
784 
785   Builder.CreateCall(F, {DeviceData, HostData, Size});
786 }
787 
788 void GPUNodeBuilder::createCallSynchronizeDevice() {
789   assert(ManagedMemory && "explicit synchronization is only necessary for "
790                           "managed memory");
791   const char *Name = "polly_synchronizeDevice";
792   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
793   Function *F = M->getFunction(Name);
794 
795   // If F is not available, declare it.
796   if (!F) {
797     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
798     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), false);
799     F = Function::Create(Ty, Linkage, Name, M);
800   }
801 
802   Builder.CreateCall(F);
803 }
804 
805 Value *GPUNodeBuilder::createCallInitContext() {
806   const char *Name;
807 
808   switch (Runtime) {
809   case GPURuntime::CUDA:
810     Name = "polly_initContextCUDA";
811     break;
812   case GPURuntime::OpenCL:
813     Name = "polly_initContextCL";
814     break;
815   }
816 
817   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
818   Function *F = M->getFunction(Name);
819 
820   // If F is not available, declare it.
821   if (!F) {
822     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
823     std::vector<Type *> Args;
824     FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false);
825     F = Function::Create(Ty, Linkage, Name, M);
826   }
827 
828   return Builder.CreateCall(F, {});
829 }
830 
831 void GPUNodeBuilder::createCallFreeContext(Value *Context) {
832   const char *Name = "polly_freeContext";
833   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
834   Function *F = M->getFunction(Name);
835 
836   // If F is not available, declare it.
837   if (!F) {
838     GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage;
839     std::vector<Type *> Args;
840     Args.push_back(Builder.getInt8PtrTy());
841     FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false);
842     F = Function::Create(Ty, Linkage, Name, M);
843   }
844 
845   Builder.CreateCall(F, {Context});
846 }
847 
848 /// Check if one string is a prefix of another.
849 ///
850 /// @param String The string in which to look for the prefix.
851 /// @param Prefix The prefix to look for.
852 static bool isPrefix(std::string String, std::string Prefix) {
853   return String.find(Prefix) == 0;
854 }
855 
856 Value *GPUNodeBuilder::getArraySize(gpu_array_info *Array) {
857   isl_ast_build *Build = isl_ast_build_from_context(S.getContext());
858   Value *ArraySize = ConstantInt::get(Builder.getInt64Ty(), Array->size);
859 
860   if (!gpu_array_is_scalar(Array)) {
861     auto OffsetDimZero = isl_pw_aff_copy(Array->bound[0]);
862     isl_ast_expr *Res = isl_ast_build_expr_from_pw_aff(Build, OffsetDimZero);
863 
864     for (unsigned int i = 1; i < Array->n_index; i++) {
865       isl_pw_aff *Bound_I = isl_pw_aff_copy(Array->bound[i]);
866       isl_ast_expr *Expr = isl_ast_build_expr_from_pw_aff(Build, Bound_I);
867       Res = isl_ast_expr_mul(Res, Expr);
868     }
869 
870     Value *NumElements = ExprBuilder.create(Res);
871     if (NumElements->getType() != ArraySize->getType())
872       NumElements = Builder.CreateSExt(NumElements, ArraySize->getType());
873     ArraySize = Builder.CreateMul(ArraySize, NumElements);
874   }
875   isl_ast_build_free(Build);
876   return ArraySize;
877 }
878 
879 Value *GPUNodeBuilder::getArrayOffset(gpu_array_info *Array) {
880   if (gpu_array_is_scalar(Array))
881     return nullptr;
882 
883   isl_ast_build *Build = isl_ast_build_from_context(S.getContext());
884 
885   isl_set *Min = isl_set_lexmin(isl_set_copy(Array->extent));
886 
887   isl_set *ZeroSet = isl_set_universe(isl_set_get_space(Min));
888 
889   for (long i = 0; i < isl_set_dim(Min, isl_dim_set); i++)
890     ZeroSet = isl_set_fix_si(ZeroSet, isl_dim_set, i, 0);
891 
892   if (isl_set_is_subset(Min, ZeroSet)) {
893     isl_set_free(Min);
894     isl_set_free(ZeroSet);
895     isl_ast_build_free(Build);
896     return nullptr;
897   }
898   isl_set_free(ZeroSet);
899 
900   isl_ast_expr *Result =
901       isl_ast_expr_from_val(isl_val_int_from_si(isl_set_get_ctx(Min), 0));
902 
903   for (long i = 0; i < isl_set_dim(Min, isl_dim_set); i++) {
904     if (i > 0) {
905       isl_pw_aff *Bound_I = isl_pw_aff_copy(Array->bound[i - 1]);
906       isl_ast_expr *BExpr = isl_ast_build_expr_from_pw_aff(Build, Bound_I);
907       Result = isl_ast_expr_mul(Result, BExpr);
908     }
909     isl_pw_aff *DimMin = isl_set_dim_min(isl_set_copy(Min), i);
910     isl_ast_expr *MExpr = isl_ast_build_expr_from_pw_aff(Build, DimMin);
911     Result = isl_ast_expr_add(Result, MExpr);
912   }
913 
914   Value *ResultValue = ExprBuilder.create(Result);
915   isl_set_free(Min);
916   isl_ast_build_free(Build);
917 
918   return ResultValue;
919 }
920 
921 Value *GPUNodeBuilder::getOrCreateManagedDeviceArray(gpu_array_info *Array,
922                                                      ScopArrayInfo *ArrayInfo) {
923 
924   assert(ManagedMemory && "Only used when you wish to get a host "
925                           "pointer for sending data to the kernel, "
926                           "with managed memory");
927   std::map<ScopArrayInfo *, Value *>::iterator it;
928   if ((it = DeviceAllocations.find(ArrayInfo)) != DeviceAllocations.end()) {
929     return it->second;
930   } else {
931     Value *HostPtr;
932 
933     if (gpu_array_is_scalar(Array))
934       HostPtr = BlockGen.getOrCreateAlloca(ArrayInfo);
935     else
936       HostPtr = ArrayInfo->getBasePtr();
937 
938     Value *Offset = getArrayOffset(Array);
939     if (Offset) {
940       HostPtr = Builder.CreatePointerCast(
941           HostPtr, ArrayInfo->getElementType()->getPointerTo());
942       HostPtr = Builder.CreateGEP(HostPtr, Offset);
943     }
944 
945     HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
946     DeviceAllocations[ArrayInfo] = HostPtr;
947     return HostPtr;
948   }
949 }
950 
951 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node *TransferStmt,
952                                         enum DataDirection Direction) {
953   assert(!ManagedMemory && "Managed memory needs no data transfers");
954   isl_ast_expr *Expr = isl_ast_node_user_get_expr(TransferStmt);
955   isl_ast_expr *Arg = isl_ast_expr_get_op_arg(Expr, 0);
956   isl_id *Id = isl_ast_expr_get_id(Arg);
957   auto Array = (gpu_array_info *)isl_id_get_user(Id);
958   auto ScopArray = (ScopArrayInfo *)(Array->user);
959 
960   Value *Size = getArraySize(Array);
961   Value *Offset = getArrayOffset(Array);
962   Value *DevPtr = DeviceAllocations[ScopArray];
963 
964   Value *HostPtr;
965 
966   if (gpu_array_is_scalar(Array))
967     HostPtr = BlockGen.getOrCreateAlloca(ScopArray);
968   else
969     HostPtr = ScopArray->getBasePtr();
970 
971   if (Offset) {
972     HostPtr = Builder.CreatePointerCast(
973         HostPtr, ScopArray->getElementType()->getPointerTo());
974     HostPtr = Builder.CreateGEP(HostPtr, Offset);
975   }
976 
977   HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy());
978 
979   if (Offset) {
980     Size = Builder.CreateSub(
981         Size, Builder.CreateMul(
982                   Offset, Builder.getInt64(ScopArray->getElemSizeInBytes())));
983   }
984 
985   if (Direction == HOST_TO_DEVICE)
986     createCallCopyFromHostToDevice(HostPtr, DevPtr, Size);
987   else
988     createCallCopyFromDeviceToHost(DevPtr, HostPtr, Size);
989 
990   isl_id_free(Id);
991   isl_ast_expr_free(Arg);
992   isl_ast_expr_free(Expr);
993   isl_ast_node_free(TransferStmt);
994 }
995 
996 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) {
997   isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt);
998   isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
999   isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1000   isl_id_free(Id);
1001   isl_ast_expr_free(StmtExpr);
1002 
1003   const char *Str = isl_id_get_name(Id);
1004   if (!strcmp(Str, "kernel")) {
1005     createKernel(UserStmt);
1006     isl_ast_expr_free(Expr);
1007     return;
1008   }
1009 
1010   if (isPrefix(Str, "to_device")) {
1011     if (!ManagedMemory)
1012       createDataTransfer(UserStmt, HOST_TO_DEVICE);
1013     else
1014       isl_ast_node_free(UserStmt);
1015 
1016     isl_ast_expr_free(Expr);
1017     return;
1018   }
1019 
1020   if (isPrefix(Str, "from_device")) {
1021     if (!ManagedMemory) {
1022       createDataTransfer(UserStmt, DEVICE_TO_HOST);
1023     } else {
1024       createCallSynchronizeDevice();
1025       isl_ast_node_free(UserStmt);
1026     }
1027     isl_ast_expr_free(Expr);
1028     return;
1029   }
1030 
1031   isl_id *Anno = isl_ast_node_get_annotation(UserStmt);
1032   struct ppcg_kernel_stmt *KernelStmt =
1033       (struct ppcg_kernel_stmt *)isl_id_get_user(Anno);
1034   isl_id_free(Anno);
1035 
1036   switch (KernelStmt->type) {
1037   case ppcg_kernel_domain:
1038     createScopStmt(Expr, KernelStmt);
1039     isl_ast_node_free(UserStmt);
1040     return;
1041   case ppcg_kernel_copy:
1042     createKernelCopy(KernelStmt);
1043     isl_ast_expr_free(Expr);
1044     isl_ast_node_free(UserStmt);
1045     return;
1046   case ppcg_kernel_sync:
1047     createKernelSync();
1048     isl_ast_expr_free(Expr);
1049     isl_ast_node_free(UserStmt);
1050     return;
1051   }
1052 
1053   isl_ast_expr_free(Expr);
1054   isl_ast_node_free(UserStmt);
1055   return;
1056 }
1057 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) {
1058   isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index);
1059   LocalIndex = isl_ast_expr_address_of(LocalIndex);
1060   Value *LocalAddr = ExprBuilder.create(LocalIndex);
1061   isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index);
1062   Index = isl_ast_expr_address_of(Index);
1063   Value *GlobalAddr = ExprBuilder.create(Index);
1064 
1065   if (KernelStmt->u.c.read) {
1066     LoadInst *Load = Builder.CreateLoad(GlobalAddr, "shared.read");
1067     Builder.CreateStore(Load, LocalAddr);
1068   } else {
1069     LoadInst *Load = Builder.CreateLoad(LocalAddr, "shared.write");
1070     Builder.CreateStore(Load, GlobalAddr);
1071   }
1072 }
1073 
1074 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr,
1075                                     ppcg_kernel_stmt *KernelStmt) {
1076   auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1077   isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr;
1078 
1079   LoopToScevMapT LTS;
1080   LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end());
1081 
1082   createSubstitutions(Expr, Stmt, LTS);
1083 
1084   if (Stmt->isBlockStmt())
1085     BlockGen.copyStmt(*Stmt, LTS, Indexes);
1086   else
1087     RegionGen.copyStmt(*Stmt, LTS, Indexes);
1088 }
1089 
1090 void GPUNodeBuilder::createKernelSync() {
1091   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1092 
1093   Function *Sync;
1094 
1095   switch (Arch) {
1096   case GPUArch::NVPTX64:
1097     Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0);
1098     break;
1099   }
1100 
1101   Builder.CreateCall(Sync, {});
1102 }
1103 
1104 /// Collect llvm::Values referenced from @p Node
1105 ///
1106 /// This function only applies to isl_ast_nodes that are user_nodes referring
1107 /// to a ScopStmt. All other node types are ignore.
1108 ///
1109 /// @param Node The node to collect references for.
1110 /// @param User A user pointer used as storage for the data that is collected.
1111 ///
1112 /// @returns isl_bool_true if data could be collected successfully.
1113 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) {
1114   if (isl_ast_node_get_type(Node) != isl_ast_node_user)
1115     return isl_bool_true;
1116 
1117   isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node);
1118   isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0);
1119   isl_id *Id = isl_ast_expr_get_id(StmtExpr);
1120   const char *Str = isl_id_get_name(Id);
1121   isl_id_free(Id);
1122   isl_ast_expr_free(StmtExpr);
1123   isl_ast_expr_free(Expr);
1124 
1125   if (!isPrefix(Str, "Stmt"))
1126     return isl_bool_true;
1127 
1128   Id = isl_ast_node_get_annotation(Node);
1129   auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id);
1130   auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt;
1131   isl_id_free(Id);
1132 
1133   addReferencesFromStmt(Stmt, User, false /* CreateScalarRefs */);
1134 
1135   return isl_bool_true;
1136 }
1137 
1138 /// Check if F is a function that we can code-generate in a GPU kernel.
1139 static bool isValidFunctionInKernel(llvm::Function *F) {
1140   assert(F && "F is an invalid pointer");
1141   // We string compare against the name of the function to allow
1142   // all variants of the intrinsic "llvm.sqrt.*"
1143   return F->isIntrinsic() && F->getName().startswith("llvm.sqrt");
1144 }
1145 
1146 /// Do not take `Function` as a subtree value.
1147 ///
1148 /// We try to take the reference of all subtree values and pass them along
1149 /// to the kernel from the host. Taking an address of any function and
1150 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1151 /// `Function`s.
1152 static bool isValidSubtreeValue(llvm::Value *V) { return !isa<Function>(V); }
1153 
1154 /// Return `Function`s from `RawSubtreeValues`.
1155 static SetVector<Function *>
1156 getFunctionsFromRawSubtreeValues(SetVector<Value *> RawSubtreeValues) {
1157   SetVector<Function *> SubtreeFunctions;
1158   for (Value *It : RawSubtreeValues) {
1159     Function *F = dyn_cast<Function>(It);
1160     if (F) {
1161       assert(isValidFunctionInKernel(F) && "Code should have bailed out by "
1162                                            "this point if an invalid function "
1163                                            "were present in a kernel.");
1164       SubtreeFunctions.insert(F);
1165     }
1166   }
1167   return SubtreeFunctions;
1168 }
1169 
1170 std::pair<SetVector<Value *>, SetVector<Function *>>
1171 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) {
1172   SetVector<Value *> SubtreeValues;
1173   SetVector<const SCEV *> SCEVs;
1174   SetVector<const Loop *> Loops;
1175   SubtreeReferences References = {
1176       LI, SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator()};
1177 
1178   for (const auto &I : IDToValue)
1179     SubtreeValues.insert(I.second);
1180 
1181   isl_ast_node_foreach_descendant_top_down(
1182       Kernel->tree, collectReferencesInGPUStmt, &References);
1183 
1184   for (const SCEV *Expr : SCEVs)
1185     findValues(Expr, SE, SubtreeValues);
1186 
1187   for (auto &SAI : S.arrays())
1188     SubtreeValues.remove(SAI->getBasePtr());
1189 
1190   isl_space *Space = S.getParamSpace();
1191   for (long i = 0; i < isl_space_dim(Space, isl_dim_param); i++) {
1192     isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i);
1193     assert(IDToValue.count(Id));
1194     Value *Val = IDToValue[Id];
1195     SubtreeValues.remove(Val);
1196     isl_id_free(Id);
1197   }
1198   isl_space_free(Space);
1199 
1200   for (long i = 0; i < isl_space_dim(Kernel->space, isl_dim_set); i++) {
1201     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1202     assert(IDToValue.count(Id));
1203     Value *Val = IDToValue[Id];
1204     SubtreeValues.remove(Val);
1205     isl_id_free(Id);
1206   }
1207 
1208   // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1209   // SubtreeValues. This is important, because we should not lose any
1210   // SubtreeValues in the process of constructing the
1211   // "ValidSubtree{Values, Functions} sets. Nor should the set
1212   // ValidSubtree{Values, Functions} have any common element.
1213   auto ValidSubtreeValuesIt =
1214       make_filter_range(SubtreeValues, isValidSubtreeValue);
1215   SetVector<Value *> ValidSubtreeValues(ValidSubtreeValuesIt.begin(),
1216                                         ValidSubtreeValuesIt.end());
1217   SetVector<Function *> ValidSubtreeFunctions(
1218       getFunctionsFromRawSubtreeValues(SubtreeValues));
1219 
1220   return std::make_pair(ValidSubtreeValues, ValidSubtreeFunctions);
1221 }
1222 
1223 void GPUNodeBuilder::clearDominators(Function *F) {
1224   DomTreeNode *N = DT.getNode(&F->getEntryBlock());
1225   std::vector<BasicBlock *> Nodes;
1226   for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I)
1227     Nodes.push_back(I->getBlock());
1228 
1229   for (BasicBlock *BB : Nodes)
1230     DT.eraseNode(BB);
1231 }
1232 
1233 void GPUNodeBuilder::clearScalarEvolution(Function *F) {
1234   for (BasicBlock &BB : *F) {
1235     Loop *L = LI.getLoopFor(&BB);
1236     if (L)
1237       SE.forgetLoop(L);
1238   }
1239 }
1240 
1241 void GPUNodeBuilder::clearLoops(Function *F) {
1242   for (BasicBlock &BB : *F) {
1243     Loop *L = LI.getLoopFor(&BB);
1244     if (L)
1245       SE.forgetLoop(L);
1246     LI.removeBlock(&BB);
1247   }
1248 }
1249 
1250 std::tuple<Value *, Value *> GPUNodeBuilder::getGridSizes(ppcg_kernel *Kernel) {
1251   std::vector<Value *> Sizes;
1252   isl_ast_build *Context = isl_ast_build_from_context(S.getContext());
1253 
1254   for (long i = 0; i < Kernel->n_grid; i++) {
1255     isl_pw_aff *Size = isl_multi_pw_aff_get_pw_aff(Kernel->grid_size, i);
1256     isl_ast_expr *GridSize = isl_ast_build_expr_from_pw_aff(Context, Size);
1257     Value *Res = ExprBuilder.create(GridSize);
1258     Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
1259     Sizes.push_back(Res);
1260   }
1261   isl_ast_build_free(Context);
1262 
1263   for (long i = Kernel->n_grid; i < 3; i++)
1264     Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1265 
1266   return std::make_tuple(Sizes[0], Sizes[1]);
1267 }
1268 
1269 std::tuple<Value *, Value *, Value *>
1270 GPUNodeBuilder::getBlockSizes(ppcg_kernel *Kernel) {
1271   std::vector<Value *> Sizes;
1272 
1273   for (long i = 0; i < Kernel->n_block; i++) {
1274     Value *Res = ConstantInt::get(Builder.getInt32Ty(), Kernel->block_dim[i]);
1275     Sizes.push_back(Res);
1276   }
1277 
1278   for (long i = Kernel->n_block; i < 3; i++)
1279     Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1));
1280 
1281   return std::make_tuple(Sizes[0], Sizes[1], Sizes[2]);
1282 }
1283 
1284 void GPUNodeBuilder::insertStoreParameter(Instruction *Parameters,
1285                                           Instruction *Param, int Index) {
1286   Value *Slot = Builder.CreateGEP(
1287       Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1288   Value *ParamTyped = Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1289   Builder.CreateStore(ParamTyped, Slot);
1290 }
1291 
1292 Value *
1293 GPUNodeBuilder::createLaunchParameters(ppcg_kernel *Kernel, Function *F,
1294                                        SetVector<Value *> SubtreeValues) {
1295   const int NumArgs = F->arg_size();
1296   std::vector<int> ArgSizes(NumArgs);
1297 
1298   Type *ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), 2 * NumArgs);
1299 
1300   BasicBlock *EntryBlock =
1301       &Builder.GetInsertBlock()->getParent()->getEntryBlock();
1302   auto AddressSpace = F->getParent()->getDataLayout().getAllocaAddrSpace();
1303   std::string Launch = "polly_launch_" + std::to_string(Kernel->id);
1304   Instruction *Parameters = new AllocaInst(
1305       ArrayTy, AddressSpace, Launch + "_params", EntryBlock->getTerminator());
1306 
1307   int Index = 0;
1308   for (long i = 0; i < Prog->n_array; i++) {
1309     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1310       continue;
1311 
1312     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1313     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(Id);
1314 
1315     ArgSizes[Index] = SAI->getElemSizeInBytes();
1316 
1317     Value *DevArray = nullptr;
1318     if (ManagedMemory) {
1319       DevArray = getOrCreateManagedDeviceArray(
1320           &Prog->array[i], const_cast<ScopArrayInfo *>(SAI));
1321     } else {
1322       DevArray = DeviceAllocations[const_cast<ScopArrayInfo *>(SAI)];
1323       DevArray = createCallGetDevicePtr(DevArray);
1324     }
1325     assert(DevArray != nullptr && "Array to be offloaded to device not "
1326                                   "initialized");
1327     Value *Offset = getArrayOffset(&Prog->array[i]);
1328 
1329     if (Offset) {
1330       DevArray = Builder.CreatePointerCast(
1331           DevArray, SAI->getElementType()->getPointerTo());
1332       DevArray = Builder.CreateGEP(DevArray, Builder.CreateNeg(Offset));
1333       DevArray = Builder.CreatePointerCast(DevArray, Builder.getInt8PtrTy());
1334     }
1335     Value *Slot = Builder.CreateGEP(
1336         Parameters, {Builder.getInt64(0), Builder.getInt64(Index)});
1337 
1338     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1339       Value *ValPtr = nullptr;
1340       if (ManagedMemory)
1341         ValPtr = DevArray;
1342       else
1343         ValPtr = BlockGen.getOrCreateAlloca(SAI);
1344 
1345       assert(ValPtr != nullptr && "ValPtr that should point to a valid object"
1346                                   " to be stored into Parameters");
1347       Value *ValPtrCast =
1348           Builder.CreatePointerCast(ValPtr, Builder.getInt8PtrTy());
1349       Builder.CreateStore(ValPtrCast, Slot);
1350     } else {
1351       Instruction *Param =
1352           new AllocaInst(Builder.getInt8PtrTy(), AddressSpace,
1353                          Launch + "_param_" + std::to_string(Index),
1354                          EntryBlock->getTerminator());
1355       Builder.CreateStore(DevArray, Param);
1356       Value *ParamTyped =
1357           Builder.CreatePointerCast(Param, Builder.getInt8PtrTy());
1358       Builder.CreateStore(ParamTyped, Slot);
1359     }
1360     Index++;
1361   }
1362 
1363   int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1364 
1365   for (long i = 0; i < NumHostIters; i++) {
1366     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1367     Value *Val = IDToValue[Id];
1368     isl_id_free(Id);
1369 
1370     ArgSizes[Index] = computeSizeInBytes(Val->getType());
1371 
1372     Instruction *Param =
1373         new AllocaInst(Val->getType(), AddressSpace,
1374                        Launch + "_param_" + std::to_string(Index),
1375                        EntryBlock->getTerminator());
1376     Builder.CreateStore(Val, Param);
1377     insertStoreParameter(Parameters, Param, Index);
1378     Index++;
1379   }
1380 
1381   int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1382 
1383   for (long i = 0; i < NumVars; i++) {
1384     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1385     Value *Val = IDToValue[Id];
1386     isl_id_free(Id);
1387 
1388     ArgSizes[Index] = computeSizeInBytes(Val->getType());
1389 
1390     Instruction *Param =
1391         new AllocaInst(Val->getType(), AddressSpace,
1392                        Launch + "_param_" + std::to_string(Index),
1393                        EntryBlock->getTerminator());
1394     Builder.CreateStore(Val, Param);
1395     insertStoreParameter(Parameters, Param, Index);
1396     Index++;
1397   }
1398 
1399   for (auto Val : SubtreeValues) {
1400     ArgSizes[Index] = computeSizeInBytes(Val->getType());
1401 
1402     Instruction *Param =
1403         new AllocaInst(Val->getType(), AddressSpace,
1404                        Launch + "_param_" + std::to_string(Index),
1405                        EntryBlock->getTerminator());
1406     Builder.CreateStore(Val, Param);
1407     insertStoreParameter(Parameters, Param, Index);
1408     Index++;
1409   }
1410 
1411   for (int i = 0; i < NumArgs; i++) {
1412     Value *Val = ConstantInt::get(Builder.getInt32Ty(), ArgSizes[i]);
1413     Instruction *Param =
1414         new AllocaInst(Builder.getInt32Ty(), AddressSpace,
1415                        Launch + "_param_size_" + std::to_string(i),
1416                        EntryBlock->getTerminator());
1417     Builder.CreateStore(Val, Param);
1418     insertStoreParameter(Parameters, Param, Index);
1419     Index++;
1420   }
1421 
1422   auto Location = EntryBlock->getTerminator();
1423   return new BitCastInst(Parameters, Builder.getInt8PtrTy(),
1424                          Launch + "_params_i8ptr", Location);
1425 }
1426 
1427 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1428     SetVector<Function *> SubtreeFunctions) {
1429   for (auto Fn : SubtreeFunctions) {
1430     const std::string ClonedFnName = Fn->getName();
1431     Function *Clone = GPUModule->getFunction(ClonedFnName);
1432     if (!Clone)
1433       Clone =
1434           Function::Create(Fn->getFunctionType(), GlobalValue::ExternalLinkage,
1435                            ClonedFnName, GPUModule.get());
1436     assert(Clone && "Expected cloned function to be initialized.");
1437     assert(ValueMap.find(Fn) == ValueMap.end() &&
1438            "Fn already present in ValueMap");
1439     ValueMap[Fn] = Clone;
1440   }
1441 }
1442 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) {
1443   isl_id *Id = isl_ast_node_get_annotation(KernelStmt);
1444   ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id);
1445   isl_id_free(Id);
1446   isl_ast_node_free(KernelStmt);
1447 
1448   if (Kernel->n_grid > 1)
1449     DeepestParallel =
1450         std::max(DeepestParallel, isl_space_dim(Kernel->space, isl_dim_set));
1451   else
1452     DeepestSequential =
1453         std::max(DeepestSequential, isl_space_dim(Kernel->space, isl_dim_set));
1454 
1455   Value *BlockDimX, *BlockDimY, *BlockDimZ;
1456   std::tie(BlockDimX, BlockDimY, BlockDimZ) = getBlockSizes(Kernel);
1457 
1458   SetVector<Value *> SubtreeValues;
1459   SetVector<Function *> SubtreeFunctions;
1460   std::tie(SubtreeValues, SubtreeFunctions) = getReferencesInKernel(Kernel);
1461 
1462   assert(Kernel->tree && "Device AST of kernel node is empty");
1463 
1464   Instruction &HostInsertPoint = *Builder.GetInsertPoint();
1465   IslExprBuilder::IDToValueTy HostIDs = IDToValue;
1466   ValueMapT HostValueMap = ValueMap;
1467   BlockGenerator::AllocaMapTy HostScalarMap = ScalarMap;
1468   ScalarMap.clear();
1469 
1470   SetVector<const Loop *> Loops;
1471 
1472   // Create for all loops we depend on values that contain the current loop
1473   // iteration. These values are necessary to generate code for SCEVs that
1474   // depend on such loops. As a result we need to pass them to the subfunction.
1475   for (const Loop *L : Loops) {
1476     const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)),
1477                                             SE.getUnknown(Builder.getInt64(1)),
1478                                             L, SCEV::FlagAnyWrap);
1479     Value *V = generateSCEV(OuterLIV);
1480     OutsideLoopIterations[L] = SE.getUnknown(V);
1481     SubtreeValues.insert(V);
1482   }
1483 
1484   createKernelFunction(Kernel, SubtreeValues, SubtreeFunctions);
1485   setupKernelSubtreeFunctions(SubtreeFunctions);
1486 
1487   create(isl_ast_node_copy(Kernel->tree));
1488 
1489   finalizeKernelArguments(Kernel);
1490   Function *F = Builder.GetInsertBlock()->getParent();
1491   addCUDAAnnotations(F->getParent(), BlockDimX, BlockDimY, BlockDimZ);
1492   clearDominators(F);
1493   clearScalarEvolution(F);
1494   clearLoops(F);
1495 
1496   IDToValue = HostIDs;
1497 
1498   ValueMap = std::move(HostValueMap);
1499   ScalarMap = std::move(HostScalarMap);
1500   EscapeMap.clear();
1501   IDToSAI.clear();
1502   Annotator.resetAlternativeAliasBases();
1503   for (auto &BasePtr : LocalArrays)
1504     S.invalidateScopArrayInfo(BasePtr, MemoryKind::Array);
1505   LocalArrays.clear();
1506 
1507   std::string ASMString = finalizeKernelFunction();
1508   Builder.SetInsertPoint(&HostInsertPoint);
1509   Value *Parameters = createLaunchParameters(Kernel, F, SubtreeValues);
1510 
1511   std::string Name = "kernel_" + std::to_string(Kernel->id);
1512   Value *KernelString = Builder.CreateGlobalStringPtr(ASMString, Name);
1513   Value *NameString = Builder.CreateGlobalStringPtr(Name, Name + "_name");
1514   Value *GPUKernel = createCallGetKernel(KernelString, NameString);
1515 
1516   Value *GridDimX, *GridDimY;
1517   std::tie(GridDimX, GridDimY) = getGridSizes(Kernel);
1518 
1519   createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY,
1520                          BlockDimZ, Parameters);
1521   createCallFreeKernel(GPUKernel);
1522 
1523   for (auto Id : KernelIds)
1524     isl_id_free(Id);
1525 
1526   KernelIds.clear();
1527 }
1528 
1529 /// Compute the DataLayout string for the NVPTX backend.
1530 ///
1531 /// @param is64Bit Are we looking for a 64 bit architecture?
1532 static std::string computeNVPTXDataLayout(bool is64Bit) {
1533   std::string Ret = "";
1534 
1535   if (!is64Bit) {
1536     Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1537            "64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1538            "64-v128:128:128-n16:32:64";
1539   } else {
1540     Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1541            "64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1542            "64-v128:128:128-n16:32:64";
1543   }
1544 
1545   return Ret;
1546 }
1547 
1548 Function *
1549 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel,
1550                                          SetVector<Value *> &SubtreeValues) {
1551   std::vector<Type *> Args;
1552   std::string Identifier = "kernel_" + std::to_string(Kernel->id);
1553 
1554   for (long i = 0; i < Prog->n_array; i++) {
1555     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1556       continue;
1557 
1558     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1559       isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1560       const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(Id);
1561       Args.push_back(SAI->getElementType());
1562     } else {
1563       static const int UseGlobalMemory = 1;
1564       Args.push_back(Builder.getInt8PtrTy(UseGlobalMemory));
1565     }
1566   }
1567 
1568   int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set);
1569 
1570   for (long i = 0; i < NumHostIters; i++)
1571     Args.push_back(Builder.getInt64Ty());
1572 
1573   int NumVars = isl_space_dim(Kernel->space, isl_dim_param);
1574 
1575   for (long i = 0; i < NumVars; i++) {
1576     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1577     Value *Val = IDToValue[Id];
1578     isl_id_free(Id);
1579     Args.push_back(Val->getType());
1580   }
1581 
1582   for (auto *V : SubtreeValues)
1583     Args.push_back(V->getType());
1584 
1585   auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false);
1586   auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier,
1587                               GPUModule.get());
1588 
1589   switch (Arch) {
1590   case GPUArch::NVPTX64:
1591     FN->setCallingConv(CallingConv::PTX_Kernel);
1592     break;
1593   }
1594 
1595   auto Arg = FN->arg_begin();
1596   for (long i = 0; i < Kernel->n_array; i++) {
1597     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1598       continue;
1599 
1600     Arg->setName(Kernel->array[i].array->name);
1601 
1602     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1603     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl_id_copy(Id));
1604     Type *EleTy = SAI->getElementType();
1605     Value *Val = &*Arg;
1606     SmallVector<const SCEV *, 4> Sizes;
1607     isl_ast_build *Build =
1608         isl_ast_build_from_context(isl_set_copy(Prog->context));
1609     Sizes.push_back(nullptr);
1610     for (long j = 1; j < Kernel->array[i].array->n_index; j++) {
1611       isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff(
1612           Build, isl_pw_aff_copy(Kernel->array[i].array->bound[j]));
1613       auto V = ExprBuilder.create(DimSize);
1614       Sizes.push_back(SE.getSCEV(V));
1615     }
1616     const ScopArrayInfo *SAIRep =
1617         S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, MemoryKind::Array);
1618     LocalArrays.push_back(Val);
1619 
1620     isl_ast_build_free(Build);
1621     KernelIds.push_back(Id);
1622     IDToSAI[Id] = SAIRep;
1623     Arg++;
1624   }
1625 
1626   for (long i = 0; i < NumHostIters; i++) {
1627     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i);
1628     Arg->setName(isl_id_get_name(Id));
1629     IDToValue[Id] = &*Arg;
1630     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
1631     Arg++;
1632   }
1633 
1634   for (long i = 0; i < NumVars; i++) {
1635     isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i);
1636     Arg->setName(isl_id_get_name(Id));
1637     Value *Val = IDToValue[Id];
1638     ValueMap[Val] = &*Arg;
1639     IDToValue[Id] = &*Arg;
1640     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
1641     Arg++;
1642   }
1643 
1644   for (auto *V : SubtreeValues) {
1645     Arg->setName(V->getName());
1646     ValueMap[V] = &*Arg;
1647     Arg++;
1648   }
1649 
1650   return FN;
1651 }
1652 
1653 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) {
1654   Intrinsic::ID IntrinsicsBID[2];
1655   Intrinsic::ID IntrinsicsTID[3];
1656 
1657   switch (Arch) {
1658   case GPUArch::NVPTX64:
1659     IntrinsicsBID[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x;
1660     IntrinsicsBID[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y;
1661 
1662     IntrinsicsTID[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x;
1663     IntrinsicsTID[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y;
1664     IntrinsicsTID[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z;
1665     break;
1666   }
1667 
1668   auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable {
1669     std::string Name = isl_id_get_name(Id);
1670     Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1671     Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr);
1672     Value *Val = Builder.CreateCall(IntrinsicFn, {});
1673     Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name);
1674     IDToValue[Id] = Val;
1675     KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id));
1676   };
1677 
1678   for (int i = 0; i < Kernel->n_grid; ++i) {
1679     isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i);
1680     addId(Id, IntrinsicsBID[i]);
1681   }
1682 
1683   for (int i = 0; i < Kernel->n_block; ++i) {
1684     isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i);
1685     addId(Id, IntrinsicsTID[i]);
1686   }
1687 }
1688 
1689 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel *Kernel, Function *FN) {
1690   auto Arg = FN->arg_begin();
1691   for (long i = 0; i < Kernel->n_array; i++) {
1692     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1693       continue;
1694 
1695     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1696     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl_id_copy(Id));
1697     isl_id_free(Id);
1698 
1699     if (SAI->getNumberOfDimensions() > 0) {
1700       Arg++;
1701       continue;
1702     }
1703 
1704     Value *Val = &*Arg;
1705 
1706     if (!gpu_array_is_read_only_scalar(&Prog->array[i])) {
1707       Type *TypePtr = SAI->getElementType()->getPointerTo();
1708       Value *TypedArgPtr = Builder.CreatePointerCast(Val, TypePtr);
1709       Val = Builder.CreateLoad(TypedArgPtr);
1710     }
1711 
1712     Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
1713     Builder.CreateStore(Val, Alloca);
1714 
1715     Arg++;
1716   }
1717 }
1718 
1719 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel *Kernel) {
1720   auto *FN = Builder.GetInsertBlock()->getParent();
1721   auto Arg = FN->arg_begin();
1722 
1723   bool StoredScalar = false;
1724   for (long i = 0; i < Kernel->n_array; i++) {
1725     if (!ppcg_kernel_requires_array_argument(Kernel, i))
1726       continue;
1727 
1728     isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set);
1729     const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl_id_copy(Id));
1730     isl_id_free(Id);
1731 
1732     if (SAI->getNumberOfDimensions() > 0) {
1733       Arg++;
1734       continue;
1735     }
1736 
1737     if (gpu_array_is_read_only_scalar(&Prog->array[i])) {
1738       Arg++;
1739       continue;
1740     }
1741 
1742     Value *Alloca = BlockGen.getOrCreateAlloca(SAI);
1743     Value *ArgPtr = &*Arg;
1744     Type *TypePtr = SAI->getElementType()->getPointerTo();
1745     Value *TypedArgPtr = Builder.CreatePointerCast(ArgPtr, TypePtr);
1746     Value *Val = Builder.CreateLoad(Alloca);
1747     Builder.CreateStore(Val, TypedArgPtr);
1748     StoredScalar = true;
1749 
1750     Arg++;
1751   }
1752 
1753   if (StoredScalar)
1754     /// In case more than one thread contains scalar stores, the generated
1755     /// code might be incorrect, if we only store at the end of the kernel.
1756     /// To support this case we need to store these scalars back at each
1757     /// memory store or at least before each kernel barrier.
1758     if (Kernel->n_block != 0 || Kernel->n_grid != 0)
1759       BuildSuccessful = 0;
1760 }
1761 
1762 void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) {
1763   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1764 
1765   for (int i = 0; i < Kernel->n_var; ++i) {
1766     struct ppcg_kernel_var &Var = Kernel->var[i];
1767     isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set);
1768     Type *EleTy = ScopArrayInfo::getFromId(Id)->getElementType();
1769 
1770     Type *ArrayTy = EleTy;
1771     SmallVector<const SCEV *, 4> Sizes;
1772 
1773     Sizes.push_back(nullptr);
1774     for (unsigned int j = 1; j < Var.array->n_index; ++j) {
1775       isl_val *Val = isl_vec_get_element_val(Var.size, j);
1776       long Bound = isl_val_get_num_si(Val);
1777       isl_val_free(Val);
1778       Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound));
1779     }
1780 
1781     for (int j = Var.array->n_index - 1; j >= 0; --j) {
1782       isl_val *Val = isl_vec_get_element_val(Var.size, j);
1783       long Bound = isl_val_get_num_si(Val);
1784       isl_val_free(Val);
1785       ArrayTy = ArrayType::get(ArrayTy, Bound);
1786     }
1787 
1788     const ScopArrayInfo *SAI;
1789     Value *Allocation;
1790     if (Var.type == ppcg_access_shared) {
1791       auto GlobalVar = new GlobalVariable(
1792           *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name,
1793           nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal, 3);
1794       GlobalVar->setAlignment(EleTy->getPrimitiveSizeInBits() / 8);
1795       GlobalVar->setInitializer(Constant::getNullValue(ArrayTy));
1796 
1797       Allocation = GlobalVar;
1798     } else if (Var.type == ppcg_access_private) {
1799       Allocation = Builder.CreateAlloca(ArrayTy, 0, "private_array");
1800     } else {
1801       llvm_unreachable("unknown variable type");
1802     }
1803     SAI =
1804         S.getOrCreateScopArrayInfo(Allocation, EleTy, Sizes, MemoryKind::Array);
1805     Id = isl_id_alloc(S.getIslCtx(), Var.name, nullptr);
1806     IDToValue[Id] = Allocation;
1807     LocalArrays.push_back(Allocation);
1808     KernelIds.push_back(Id);
1809     IDToSAI[Id] = SAI;
1810   }
1811 }
1812 
1813 void GPUNodeBuilder::createKernelFunction(
1814     ppcg_kernel *Kernel, SetVector<Value *> &SubtreeValues,
1815     SetVector<Function *> &SubtreeFunctions) {
1816   std::string Identifier = "kernel_" + std::to_string(Kernel->id);
1817   GPUModule.reset(new Module(Identifier, Builder.getContext()));
1818 
1819   switch (Arch) {
1820   case GPUArch::NVPTX64:
1821     if (Runtime == GPURuntime::CUDA)
1822       GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
1823     else if (Runtime == GPURuntime::OpenCL)
1824       GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
1825     GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
1826     break;
1827   }
1828 
1829   Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues);
1830 
1831   BasicBlock *PrevBlock = Builder.GetInsertBlock();
1832   auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN);
1833 
1834   DT.addNewBlock(EntryBlock, PrevBlock);
1835 
1836   Builder.SetInsertPoint(EntryBlock);
1837   Builder.CreateRetVoid();
1838   Builder.SetInsertPoint(EntryBlock, EntryBlock->begin());
1839 
1840   ScopDetection::markFunctionAsInvalid(FN);
1841 
1842   prepareKernelArguments(Kernel, FN);
1843   createKernelVariables(Kernel, FN);
1844   insertKernelIntrinsics(Kernel);
1845 }
1846 
1847 std::string GPUNodeBuilder::createKernelASM() {
1848   llvm::Triple GPUTriple;
1849 
1850   switch (Arch) {
1851   case GPUArch::NVPTX64:
1852     switch (Runtime) {
1853     case GPURuntime::CUDA:
1854       GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
1855       break;
1856     case GPURuntime::OpenCL:
1857       GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
1858       break;
1859     }
1860     break;
1861   }
1862 
1863   std::string ErrMsg;
1864   auto GPUTarget = TargetRegistry::lookupTarget(GPUTriple.getTriple(), ErrMsg);
1865 
1866   if (!GPUTarget) {
1867     errs() << ErrMsg << "\n";
1868     return "";
1869   }
1870 
1871   TargetOptions Options;
1872   Options.UnsafeFPMath = FastMath;
1873 
1874   std::string subtarget;
1875 
1876   switch (Arch) {
1877   case GPUArch::NVPTX64:
1878     subtarget = CudaVersion;
1879     break;
1880   }
1881 
1882   std::unique_ptr<TargetMachine> TargetM(GPUTarget->createTargetMachine(
1883       GPUTriple.getTriple(), subtarget, "", Options, Optional<Reloc::Model>()));
1884 
1885   SmallString<0> ASMString;
1886   raw_svector_ostream ASMStream(ASMString);
1887   llvm::legacy::PassManager PM;
1888 
1889   PM.add(createTargetTransformInfoWrapperPass(TargetM->getTargetIRAnalysis()));
1890 
1891   if (TargetM->addPassesToEmitFile(
1892           PM, ASMStream, TargetMachine::CGFT_AssemblyFile, true /* verify */)) {
1893     errs() << "The target does not support generation of this file type!\n";
1894     return "";
1895   }
1896 
1897   PM.run(*GPUModule);
1898 
1899   return ASMStream.str();
1900 }
1901 
1902 std::string GPUNodeBuilder::finalizeKernelFunction() {
1903   if (verifyModule(*GPUModule)) {
1904     BuildSuccessful = false;
1905     return "";
1906   }
1907 
1908   if (DumpKernelIR)
1909     outs() << *GPUModule << "\n";
1910 
1911   // Optimize module.
1912   llvm::legacy::PassManager OptPasses;
1913   PassManagerBuilder PassBuilder;
1914   PassBuilder.OptLevel = 3;
1915   PassBuilder.SizeLevel = 0;
1916   PassBuilder.populateModulePassManager(OptPasses);
1917   OptPasses.run(*GPUModule);
1918 
1919   std::string Assembly = createKernelASM();
1920 
1921   if (DumpKernelASM)
1922     outs() << Assembly << "\n";
1923 
1924   GPUModule.release();
1925   KernelIDs.clear();
1926 
1927   return Assembly;
1928 }
1929 
1930 namespace {
1931 class PPCGCodeGeneration : public ScopPass {
1932 public:
1933   static char ID;
1934 
1935   GPURuntime Runtime = GPURuntime::CUDA;
1936 
1937   GPUArch Architecture = GPUArch::NVPTX64;
1938 
1939   /// The scop that is currently processed.
1940   Scop *S;
1941 
1942   LoopInfo *LI;
1943   DominatorTree *DT;
1944   ScalarEvolution *SE;
1945   const DataLayout *DL;
1946   RegionInfo *RI;
1947 
1948   PPCGCodeGeneration() : ScopPass(ID) {}
1949 
1950   /// Construct compilation options for PPCG.
1951   ///
1952   /// @returns The compilation options.
1953   ppcg_options *createPPCGOptions() {
1954     auto DebugOptions =
1955         (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options));
1956     auto Options = (ppcg_options *)malloc(sizeof(ppcg_options));
1957 
1958     DebugOptions->dump_schedule_constraints = false;
1959     DebugOptions->dump_schedule = false;
1960     DebugOptions->dump_final_schedule = false;
1961     DebugOptions->dump_sizes = false;
1962     DebugOptions->verbose = false;
1963 
1964     Options->debug = DebugOptions;
1965 
1966     Options->reschedule = true;
1967     Options->scale_tile_loops = false;
1968     Options->wrap = false;
1969 
1970     Options->non_negative_parameters = false;
1971     Options->ctx = nullptr;
1972     Options->sizes = nullptr;
1973 
1974     Options->tile_size = 32;
1975 
1976     Options->use_private_memory = PrivateMemory;
1977     Options->use_shared_memory = SharedMemory;
1978     Options->max_shared_memory = 48 * 1024;
1979 
1980     Options->target = PPCG_TARGET_CUDA;
1981     Options->openmp = false;
1982     Options->linearize_device_arrays = true;
1983     Options->live_range_reordering = false;
1984 
1985     Options->opencl_compiler_options = nullptr;
1986     Options->opencl_use_gpu = false;
1987     Options->opencl_n_include_file = 0;
1988     Options->opencl_include_files = nullptr;
1989     Options->opencl_print_kernel_types = false;
1990     Options->opencl_embed_kernel_code = false;
1991 
1992     Options->save_schedule_file = nullptr;
1993     Options->load_schedule_file = nullptr;
1994 
1995     return Options;
1996   }
1997 
1998   /// Get a tagged access relation containing all accesses of type @p AccessTy.
1999   ///
2000   /// Instead of a normal access of the form:
2001   ///
2002   ///   Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2003   ///
2004   /// a tagged access has the form
2005   ///
2006   ///   [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2007   ///
2008   /// where 'id' is an additional space that references the memory access that
2009   /// triggered the access.
2010   ///
2011   /// @param AccessTy The type of the memory accesses to collect.
2012   ///
2013   /// @return The relation describing all tagged memory accesses.
2014   isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) {
2015     isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace());
2016 
2017     for (auto &Stmt : *S)
2018       for (auto &Acc : Stmt)
2019         if (Acc->getType() == AccessTy) {
2020           isl_map *Relation = Acc->getAccessRelation();
2021           Relation = isl_map_intersect_domain(Relation, Stmt.getDomain());
2022 
2023           isl_space *Space = isl_map_get_space(Relation);
2024           Space = isl_space_range(Space);
2025           Space = isl_space_from_range(Space);
2026           Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId());
2027           isl_map *Universe = isl_map_universe(Space);
2028           Relation = isl_map_domain_product(Relation, Universe);
2029           Accesses = isl_union_map_add_map(Accesses, Relation);
2030         }
2031 
2032     return Accesses;
2033   }
2034 
2035   /// Get the set of all read accesses, tagged with the access id.
2036   ///
2037   /// @see getTaggedAccesses
2038   isl_union_map *getTaggedReads() {
2039     return getTaggedAccesses(MemoryAccess::READ);
2040   }
2041 
2042   /// Get the set of all may (and must) accesses, tagged with the access id.
2043   ///
2044   /// @see getTaggedAccesses
2045   isl_union_map *getTaggedMayWrites() {
2046     return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE),
2047                                getTaggedAccesses(MemoryAccess::MUST_WRITE));
2048   }
2049 
2050   /// Get the set of all must accesses, tagged with the access id.
2051   ///
2052   /// @see getTaggedAccesses
2053   isl_union_map *getTaggedMustWrites() {
2054     return getTaggedAccesses(MemoryAccess::MUST_WRITE);
2055   }
2056 
2057   /// Collect parameter and array names as isl_ids.
2058   ///
2059   /// To reason about the different parameters and arrays used, ppcg requires
2060   /// a list of all isl_ids in use. As PPCG traditionally performs
2061   /// source-to-source compilation each of these isl_ids is mapped to the
2062   /// expression that represents it. As we do not have a corresponding
2063   /// expression in Polly, we just map each id to a 'zero' expression to match
2064   /// the data format that ppcg expects.
2065   ///
2066   /// @returns Retun a map from collected ids to 'zero' ast expressions.
2067   __isl_give isl_id_to_ast_expr *getNames() {
2068     auto *Names = isl_id_to_ast_expr_alloc(
2069         S->getIslCtx(),
2070         S->getNumParams() + std::distance(S->array_begin(), S->array_end()));
2071     auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx()));
2072     auto *Space = S->getParamSpace();
2073 
2074     for (int I = 0, E = S->getNumParams(); I < E; ++I) {
2075       isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, I);
2076       Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2077     }
2078 
2079     for (auto &Array : S->arrays()) {
2080       auto Id = Array->getBasePtrId();
2081       Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero));
2082     }
2083 
2084     isl_space_free(Space);
2085     isl_ast_expr_free(Zero);
2086 
2087     return Names;
2088   }
2089 
2090   /// Create a new PPCG scop from the current scop.
2091   ///
2092   /// The PPCG scop is initialized with data from the current polly::Scop. From
2093   /// this initial data, the data-dependences in the PPCG scop are initialized.
2094   /// We do not use Polly's dependence analysis for now, to ensure we match
2095   /// the PPCG default behaviour more closely.
2096   ///
2097   /// @returns A new ppcg scop.
2098   ppcg_scop *createPPCGScop() {
2099     auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop));
2100 
2101     PPCGScop->options = createPPCGOptions();
2102 
2103     PPCGScop->start = 0;
2104     PPCGScop->end = 0;
2105 
2106     PPCGScop->context = S->getContext();
2107     PPCGScop->domain = S->getDomains();
2108     PPCGScop->call = nullptr;
2109     PPCGScop->tagged_reads = getTaggedReads();
2110     PPCGScop->reads = S->getReads();
2111     PPCGScop->live_in = nullptr;
2112     PPCGScop->tagged_may_writes = getTaggedMayWrites();
2113     PPCGScop->may_writes = S->getWrites();
2114     PPCGScop->tagged_must_writes = getTaggedMustWrites();
2115     PPCGScop->must_writes = S->getMustWrites();
2116     PPCGScop->live_out = nullptr;
2117     PPCGScop->tagged_must_kills = isl_union_map_empty(S->getParamSpace());
2118     PPCGScop->tagger = nullptr;
2119 
2120     PPCGScop->independence = nullptr;
2121     PPCGScop->dep_flow = nullptr;
2122     PPCGScop->tagged_dep_flow = nullptr;
2123     PPCGScop->dep_false = nullptr;
2124     PPCGScop->dep_forced = nullptr;
2125     PPCGScop->dep_order = nullptr;
2126     PPCGScop->tagged_dep_order = nullptr;
2127 
2128     PPCGScop->schedule = S->getScheduleTree();
2129     PPCGScop->names = getNames();
2130 
2131     PPCGScop->pet = nullptr;
2132 
2133     compute_tagger(PPCGScop);
2134     compute_dependences(PPCGScop);
2135 
2136     return PPCGScop;
2137   }
2138 
2139   /// Collect the array accesses in a statement.
2140   ///
2141   /// @param Stmt The statement for which to collect the accesses.
2142   ///
2143   /// @returns A list of array accesses.
2144   gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) {
2145     gpu_stmt_access *Accesses = nullptr;
2146 
2147     for (MemoryAccess *Acc : Stmt) {
2148       auto Access = isl_alloc_type(S->getIslCtx(), struct gpu_stmt_access);
2149       Access->read = Acc->isRead();
2150       Access->write = Acc->isWrite();
2151       Access->access = Acc->getAccessRelation();
2152       isl_space *Space = isl_map_get_space(Access->access);
2153       Space = isl_space_range(Space);
2154       Space = isl_space_from_range(Space);
2155       Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId());
2156       isl_map *Universe = isl_map_universe(Space);
2157       Access->tagged_access =
2158           isl_map_domain_product(Acc->getAccessRelation(), Universe);
2159       Access->exact_write = !Acc->isMayWrite();
2160       Access->ref_id = Acc->getId();
2161       Access->next = Accesses;
2162       Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions();
2163       Accesses = Access;
2164     }
2165 
2166     return Accesses;
2167   }
2168 
2169   /// Collect the list of GPU statements.
2170   ///
2171   /// Each statement has an id, a pointer to the underlying data structure,
2172   /// as well as a list with all memory accesses.
2173   ///
2174   /// TODO: Initialize the list of memory accesses.
2175   ///
2176   /// @returns A linked-list of statements.
2177   gpu_stmt *getStatements() {
2178     gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx(), struct gpu_stmt,
2179                                        std::distance(S->begin(), S->end()));
2180 
2181     int i = 0;
2182     for (auto &Stmt : *S) {
2183       gpu_stmt *GPUStmt = &Stmts[i];
2184 
2185       GPUStmt->id = Stmt.getDomainId();
2186 
2187       // We use the pet stmt pointer to keep track of the Polly statements.
2188       GPUStmt->stmt = (pet_stmt *)&Stmt;
2189       GPUStmt->accesses = getStmtAccesses(Stmt);
2190       i++;
2191     }
2192 
2193     return Stmts;
2194   }
2195 
2196   /// Derive the extent of an array.
2197   ///
2198   /// The extent of an array is the set of elements that are within the
2199   /// accessed array. For the inner dimensions, the extent constraints are
2200   /// 0 and the size of the corresponding array dimension. For the first
2201   /// (outermost) dimension, the extent constraints are the minimal and maximal
2202   /// subscript value for the first dimension.
2203   ///
2204   /// @param Array The array to derive the extent for.
2205   ///
2206   /// @returns An isl_set describing the extent of the array.
2207   __isl_give isl_set *getExtent(ScopArrayInfo *Array) {
2208     unsigned NumDims = Array->getNumberOfDimensions();
2209     isl_union_map *Accesses = S->getAccesses();
2210     Accesses = isl_union_map_intersect_domain(Accesses, S->getDomains());
2211     Accesses = isl_union_map_detect_equalities(Accesses);
2212     isl_union_set *AccessUSet = isl_union_map_range(Accesses);
2213     AccessUSet = isl_union_set_coalesce(AccessUSet);
2214     AccessUSet = isl_union_set_detect_equalities(AccessUSet);
2215     AccessUSet = isl_union_set_coalesce(AccessUSet);
2216 
2217     if (isl_union_set_is_empty(AccessUSet)) {
2218       isl_union_set_free(AccessUSet);
2219       return isl_set_empty(Array->getSpace());
2220     }
2221 
2222     if (Array->getNumberOfDimensions() == 0) {
2223       isl_union_set_free(AccessUSet);
2224       return isl_set_universe(Array->getSpace());
2225     }
2226 
2227     isl_set *AccessSet =
2228         isl_union_set_extract_set(AccessUSet, Array->getSpace());
2229 
2230     isl_union_set_free(AccessUSet);
2231     isl_local_space *LS = isl_local_space_from_space(Array->getSpace());
2232 
2233     isl_pw_aff *Val =
2234         isl_pw_aff_from_aff(isl_aff_var_on_domain(LS, isl_dim_set, 0));
2235 
2236     isl_pw_aff *OuterMin = isl_set_dim_min(isl_set_copy(AccessSet), 0);
2237     isl_pw_aff *OuterMax = isl_set_dim_max(AccessSet, 0);
2238     OuterMin = isl_pw_aff_add_dims(OuterMin, isl_dim_in,
2239                                    isl_pw_aff_dim(Val, isl_dim_in));
2240     OuterMax = isl_pw_aff_add_dims(OuterMax, isl_dim_in,
2241                                    isl_pw_aff_dim(Val, isl_dim_in));
2242     OuterMin =
2243         isl_pw_aff_set_tuple_id(OuterMin, isl_dim_in, Array->getBasePtrId());
2244     OuterMax =
2245         isl_pw_aff_set_tuple_id(OuterMax, isl_dim_in, Array->getBasePtrId());
2246 
2247     isl_set *Extent = isl_set_universe(Array->getSpace());
2248 
2249     Extent = isl_set_intersect(
2250         Extent, isl_pw_aff_le_set(OuterMin, isl_pw_aff_copy(Val)));
2251     Extent = isl_set_intersect(Extent, isl_pw_aff_ge_set(OuterMax, Val));
2252 
2253     for (unsigned i = 1; i < NumDims; ++i)
2254       Extent = isl_set_lower_bound_si(Extent, isl_dim_set, i, 0);
2255 
2256     for (unsigned i = 0; i < NumDims; ++i) {
2257       isl_pw_aff *PwAff =
2258           const_cast<isl_pw_aff *>(Array->getDimensionSizePw(i));
2259 
2260       // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2261       // Fortran array will we have a legitimate dimension.
2262       if (!PwAff) {
2263         assert(i == 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2264         continue;
2265       }
2266 
2267       isl_pw_aff *Val = isl_pw_aff_from_aff(isl_aff_var_on_domain(
2268           isl_local_space_from_space(Array->getSpace()), isl_dim_set, i));
2269       PwAff = isl_pw_aff_add_dims(PwAff, isl_dim_in,
2270                                   isl_pw_aff_dim(Val, isl_dim_in));
2271       PwAff = isl_pw_aff_set_tuple_id(PwAff, isl_dim_in,
2272                                       isl_pw_aff_get_tuple_id(Val, isl_dim_in));
2273       auto *Set = isl_pw_aff_gt_set(PwAff, Val);
2274       Extent = isl_set_intersect(Set, Extent);
2275     }
2276 
2277     return Extent;
2278   }
2279 
2280   /// Derive the bounds of an array.
2281   ///
2282   /// For the first dimension we derive the bound of the array from the extent
2283   /// of this dimension. For inner dimensions we obtain their size directly from
2284   /// ScopArrayInfo.
2285   ///
2286   /// @param PPCGArray The array to compute bounds for.
2287   /// @param Array The polly array from which to take the information.
2288   void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) {
2289     if (PPCGArray.n_index > 0) {
2290       if (isl_set_is_empty(PPCGArray.extent)) {
2291         isl_set *Dom = isl_set_copy(PPCGArray.extent);
2292         isl_local_space *LS = isl_local_space_from_space(
2293             isl_space_params(isl_set_get_space(Dom)));
2294         isl_set_free(Dom);
2295         isl_aff *Zero = isl_aff_zero_on_domain(LS);
2296         PPCGArray.bound[0] = isl_pw_aff_from_aff(Zero);
2297       } else {
2298         isl_set *Dom = isl_set_copy(PPCGArray.extent);
2299         Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1);
2300         isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0);
2301         isl_set_free(Dom);
2302         Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound));
2303         isl_local_space *LS =
2304             isl_local_space_from_space(isl_set_get_space(Dom));
2305         isl_aff *One = isl_aff_zero_on_domain(LS);
2306         One = isl_aff_add_constant_si(One, 1);
2307         Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One));
2308         Bound = isl_pw_aff_gist(Bound, S->getContext());
2309         PPCGArray.bound[0] = Bound;
2310       }
2311     }
2312 
2313     for (unsigned i = 1; i < PPCGArray.n_index; ++i) {
2314       isl_pw_aff *Bound = Array->getDimensionSizePw(i);
2315       auto LS = isl_pw_aff_get_domain_space(Bound);
2316       auto Aff = isl_multi_aff_zero(LS);
2317       Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff);
2318       PPCGArray.bound[i] = Bound;
2319     }
2320   }
2321 
2322   /// Create the arrays for @p PPCGProg.
2323   ///
2324   /// @param PPCGProg The program to compute the arrays for.
2325   void createArrays(gpu_prog *PPCGProg) {
2326     int i = 0;
2327     for (auto &Array : S->arrays()) {
2328       std::string TypeName;
2329       raw_string_ostream OS(TypeName);
2330 
2331       OS << *Array->getElementType();
2332       TypeName = OS.str();
2333 
2334       gpu_array_info &PPCGArray = PPCGProg->array[i];
2335 
2336       PPCGArray.space = Array->getSpace();
2337       PPCGArray.type = strdup(TypeName.c_str());
2338       PPCGArray.size = Array->getElementType()->getPrimitiveSizeInBits() / 8;
2339       PPCGArray.name = strdup(Array->getName().c_str());
2340       PPCGArray.extent = nullptr;
2341       PPCGArray.n_index = Array->getNumberOfDimensions();
2342       PPCGArray.bound =
2343           isl_alloc_array(S->getIslCtx(), isl_pw_aff *, PPCGArray.n_index);
2344       PPCGArray.extent = getExtent(Array);
2345       PPCGArray.n_ref = 0;
2346       PPCGArray.refs = nullptr;
2347       PPCGArray.accessed = true;
2348       PPCGArray.read_only_scalar =
2349           Array->isReadOnly() && Array->getNumberOfDimensions() == 0;
2350       PPCGArray.has_compound_element = false;
2351       PPCGArray.local = false;
2352       PPCGArray.declare_local = false;
2353       PPCGArray.global = false;
2354       PPCGArray.linearize = false;
2355       PPCGArray.dep_order = nullptr;
2356       PPCGArray.user = Array;
2357 
2358       setArrayBounds(PPCGArray, Array);
2359       i++;
2360 
2361       collect_references(PPCGProg, &PPCGArray);
2362     }
2363   }
2364 
2365   /// Create an identity map between the arrays in the scop.
2366   ///
2367   /// @returns An identity map between the arrays in the scop.
2368   isl_union_map *getArrayIdentity() {
2369     isl_union_map *Maps = isl_union_map_empty(S->getParamSpace());
2370 
2371     for (auto &Array : S->arrays()) {
2372       isl_space *Space = Array->getSpace();
2373       Space = isl_space_map_from_set(Space);
2374       isl_map *Identity = isl_map_identity(Space);
2375       Maps = isl_union_map_add_map(Maps, Identity);
2376     }
2377 
2378     return Maps;
2379   }
2380 
2381   /// Create a default-initialized PPCG GPU program.
2382   ///
2383   /// @returns A new gpu program description.
2384   gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) {
2385 
2386     if (!PPCGScop)
2387       return nullptr;
2388 
2389     auto PPCGProg = isl_calloc_type(S->getIslCtx(), struct gpu_prog);
2390 
2391     PPCGProg->ctx = S->getIslCtx();
2392     PPCGProg->scop = PPCGScop;
2393     PPCGProg->context = isl_set_copy(PPCGScop->context);
2394     PPCGProg->read = isl_union_map_copy(PPCGScop->reads);
2395     PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes);
2396     PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes);
2397     PPCGProg->tagged_must_kill =
2398         isl_union_map_copy(PPCGScop->tagged_must_kills);
2399     PPCGProg->to_inner = getArrayIdentity();
2400     PPCGProg->to_outer = getArrayIdentity();
2401     PPCGProg->any_to_outer = nullptr;
2402     PPCGProg->array_order = nullptr;
2403     PPCGProg->n_stmts = std::distance(S->begin(), S->end());
2404     PPCGProg->stmts = getStatements();
2405     PPCGProg->n_array = std::distance(S->array_begin(), S->array_end());
2406     PPCGProg->array = isl_calloc_array(S->getIslCtx(), struct gpu_array_info,
2407                                        PPCGProg->n_array);
2408 
2409     createArrays(PPCGProg);
2410 
2411     PPCGProg->may_persist = compute_may_persist(PPCGProg);
2412 
2413     return PPCGProg;
2414   }
2415 
2416   struct PrintGPUUserData {
2417     struct cuda_info *CudaInfo;
2418     struct gpu_prog *PPCGProg;
2419     std::vector<ppcg_kernel *> Kernels;
2420   };
2421 
2422   /// Print a user statement node in the host code.
2423   ///
2424   /// We use ppcg's printing facilities to print the actual statement and
2425   /// additionally build up a list of all kernels that are encountered in the
2426   /// host ast.
2427   ///
2428   /// @param P The printer to print to
2429   /// @param Options The printing options to use
2430   /// @param Node The node to print
2431   /// @param User A user pointer to carry additional data. This pointer is
2432   ///             expected to be of type PrintGPUUserData.
2433   ///
2434   /// @returns A printer to which the output has been printed.
2435   static __isl_give isl_printer *
2436   printHostUser(__isl_take isl_printer *P,
2437                 __isl_take isl_ast_print_options *Options,
2438                 __isl_take isl_ast_node *Node, void *User) {
2439     auto Data = (struct PrintGPUUserData *)User;
2440     auto Id = isl_ast_node_get_annotation(Node);
2441 
2442     if (Id) {
2443       bool IsUser = !strcmp(isl_id_get_name(Id), "user");
2444 
2445       // If this is a user statement, format it ourselves as ppcg would
2446       // otherwise try to call pet functionality that is not available in
2447       // Polly.
2448       if (IsUser) {
2449         P = isl_printer_start_line(P);
2450         P = isl_printer_print_ast_node(P, Node);
2451         P = isl_printer_end_line(P);
2452         isl_id_free(Id);
2453         isl_ast_print_options_free(Options);
2454         return P;
2455       }
2456 
2457       auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id);
2458       isl_id_free(Id);
2459       Data->Kernels.push_back(Kernel);
2460     }
2461 
2462     return print_host_user(P, Options, Node, User);
2463   }
2464 
2465   /// Print C code corresponding to the control flow in @p Kernel.
2466   ///
2467   /// @param Kernel The kernel to print
2468   void printKernel(ppcg_kernel *Kernel) {
2469     auto *P = isl_printer_to_str(S->getIslCtx());
2470     P = isl_printer_set_output_format(P, ISL_FORMAT_C);
2471     auto *Options = isl_ast_print_options_alloc(S->getIslCtx());
2472     P = isl_ast_node_print(Kernel->tree, P, Options);
2473     char *String = isl_printer_get_str(P);
2474     printf("%s\n", String);
2475     free(String);
2476     isl_printer_free(P);
2477   }
2478 
2479   /// Print C code corresponding to the GPU code described by @p Tree.
2480   ///
2481   /// @param Tree An AST describing GPU code
2482   /// @param PPCGProg The PPCG program from which @Tree has been constructed.
2483   void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) {
2484     auto *P = isl_printer_to_str(S->getIslCtx());
2485     P = isl_printer_set_output_format(P, ISL_FORMAT_C);
2486 
2487     PrintGPUUserData Data;
2488     Data.PPCGProg = PPCGProg;
2489 
2490     auto *Options = isl_ast_print_options_alloc(S->getIslCtx());
2491     Options =
2492         isl_ast_print_options_set_print_user(Options, printHostUser, &Data);
2493     P = isl_ast_node_print(Tree, P, Options);
2494     char *String = isl_printer_get_str(P);
2495     printf("# host\n");
2496     printf("%s\n", String);
2497     free(String);
2498     isl_printer_free(P);
2499 
2500     for (auto Kernel : Data.Kernels) {
2501       printf("# kernel%d\n", Kernel->id);
2502       printKernel(Kernel);
2503     }
2504   }
2505 
2506   // Generate a GPU program using PPCG.
2507   //
2508   // GPU mapping consists of multiple steps:
2509   //
2510   //  1) Compute new schedule for the program.
2511   //  2) Map schedule to GPU (TODO)
2512   //  3) Generate code for new schedule (TODO)
2513   //
2514   // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
2515   // is mostly CPU specific. Instead, we use PPCG's GPU code generation
2516   // strategy directly from this pass.
2517   gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) {
2518 
2519     auto PPCGGen = isl_calloc_type(S->getIslCtx(), struct gpu_gen);
2520 
2521     PPCGGen->ctx = S->getIslCtx();
2522     PPCGGen->options = PPCGScop->options;
2523     PPCGGen->print = nullptr;
2524     PPCGGen->print_user = nullptr;
2525     PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt;
2526     PPCGGen->prog = PPCGProg;
2527     PPCGGen->tree = nullptr;
2528     PPCGGen->types.n = 0;
2529     PPCGGen->types.name = nullptr;
2530     PPCGGen->sizes = nullptr;
2531     PPCGGen->used_sizes = nullptr;
2532     PPCGGen->kernel_id = 0;
2533 
2534     // Set scheduling strategy to same strategy PPCG is using.
2535     isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true);
2536     isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true);
2537     isl_options_set_schedule_whole_component(PPCGGen->ctx, false);
2538 
2539     isl_schedule *Schedule = get_schedule(PPCGGen);
2540 
2541     int has_permutable = has_any_permutable_node(Schedule);
2542 
2543     if (!has_permutable || has_permutable < 0) {
2544       Schedule = isl_schedule_free(Schedule);
2545     } else {
2546       Schedule = map_to_device(PPCGGen, Schedule);
2547       PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule));
2548     }
2549 
2550     if (DumpSchedule) {
2551       isl_printer *P = isl_printer_to_str(S->getIslCtx());
2552       P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
2553       P = isl_printer_print_str(P, "Schedule\n");
2554       P = isl_printer_print_str(P, "========\n");
2555       if (Schedule)
2556         P = isl_printer_print_schedule(P, Schedule);
2557       else
2558         P = isl_printer_print_str(P, "No schedule found\n");
2559 
2560       printf("%s\n", isl_printer_get_str(P));
2561       isl_printer_free(P);
2562     }
2563 
2564     if (DumpCode) {
2565       printf("Code\n");
2566       printf("====\n");
2567       if (PPCGGen->tree)
2568         printGPUTree(PPCGGen->tree, PPCGProg);
2569       else
2570         printf("No code generated\n");
2571     }
2572 
2573     isl_schedule_free(Schedule);
2574 
2575     return PPCGGen;
2576   }
2577 
2578   /// Free gpu_gen structure.
2579   ///
2580   /// @param PPCGGen The ppcg_gen object to free.
2581   void freePPCGGen(gpu_gen *PPCGGen) {
2582     isl_ast_node_free(PPCGGen->tree);
2583     isl_union_map_free(PPCGGen->sizes);
2584     isl_union_map_free(PPCGGen->used_sizes);
2585     free(PPCGGen);
2586   }
2587 
2588   /// Free the options in the ppcg scop structure.
2589   ///
2590   /// ppcg is not freeing these options for us. To avoid leaks we do this
2591   /// ourselves.
2592   ///
2593   /// @param PPCGScop The scop referencing the options to free.
2594   void freeOptions(ppcg_scop *PPCGScop) {
2595     free(PPCGScop->options->debug);
2596     PPCGScop->options->debug = nullptr;
2597     free(PPCGScop->options);
2598     PPCGScop->options = nullptr;
2599   }
2600 
2601   /// Approximate the number of points in the set.
2602   ///
2603   /// This function returns an ast expression that overapproximates the number
2604   /// of points in an isl set through the rectangular hull surrounding this set.
2605   ///
2606   /// @param Set   The set to count.
2607   /// @param Build The isl ast build object to use for creating the ast
2608   ///              expression.
2609   ///
2610   /// @returns An approximation of the number of points in the set.
2611   __isl_give isl_ast_expr *approxPointsInSet(__isl_take isl_set *Set,
2612                                              __isl_keep isl_ast_build *Build) {
2613 
2614     isl_val *One = isl_val_int_from_si(isl_set_get_ctx(Set), 1);
2615     auto *Expr = isl_ast_expr_from_val(isl_val_copy(One));
2616 
2617     isl_space *Space = isl_set_get_space(Set);
2618     Space = isl_space_params(Space);
2619     auto *Univ = isl_set_universe(Space);
2620     isl_pw_aff *OneAff = isl_pw_aff_val_on_domain(Univ, One);
2621 
2622     for (long i = 0; i < isl_set_dim(Set, isl_dim_set); i++) {
2623       isl_pw_aff *Max = isl_set_dim_max(isl_set_copy(Set), i);
2624       isl_pw_aff *Min = isl_set_dim_min(isl_set_copy(Set), i);
2625       isl_pw_aff *DimSize = isl_pw_aff_sub(Max, Min);
2626       DimSize = isl_pw_aff_add(DimSize, isl_pw_aff_copy(OneAff));
2627       auto DimSizeExpr = isl_ast_build_expr_from_pw_aff(Build, DimSize);
2628       Expr = isl_ast_expr_mul(Expr, DimSizeExpr);
2629     }
2630 
2631     isl_set_free(Set);
2632     isl_pw_aff_free(OneAff);
2633 
2634     return Expr;
2635   }
2636 
2637   /// Approximate a number of dynamic instructions executed by a given
2638   /// statement.
2639   ///
2640   /// @param Stmt  The statement for which to compute the number of dynamic
2641   ///              instructions.
2642   /// @param Build The isl ast build object to use for creating the ast
2643   ///              expression.
2644   /// @returns An approximation of the number of dynamic instructions executed
2645   ///          by @p Stmt.
2646   __isl_give isl_ast_expr *approxDynamicInst(ScopStmt &Stmt,
2647                                              __isl_keep isl_ast_build *Build) {
2648     auto Iterations = approxPointsInSet(Stmt.getDomain(), Build);
2649 
2650     long InstCount = 0;
2651 
2652     if (Stmt.isBlockStmt()) {
2653       auto *BB = Stmt.getBasicBlock();
2654       InstCount = std::distance(BB->begin(), BB->end());
2655     } else {
2656       auto *R = Stmt.getRegion();
2657 
2658       for (auto *BB : R->blocks()) {
2659         InstCount += std::distance(BB->begin(), BB->end());
2660       }
2661     }
2662 
2663     isl_val *InstVal = isl_val_int_from_si(S->getIslCtx(), InstCount);
2664     auto *InstExpr = isl_ast_expr_from_val(InstVal);
2665     return isl_ast_expr_mul(InstExpr, Iterations);
2666   }
2667 
2668   /// Approximate dynamic instructions executed in scop.
2669   ///
2670   /// @param S     The scop for which to approximate dynamic instructions.
2671   /// @param Build The isl ast build object to use for creating the ast
2672   ///              expression.
2673   /// @returns An approximation of the number of dynamic instructions executed
2674   ///          in @p S.
2675   __isl_give isl_ast_expr *
2676   getNumberOfIterations(Scop &S, __isl_keep isl_ast_build *Build) {
2677     isl_ast_expr *Instructions;
2678 
2679     isl_val *Zero = isl_val_int_from_si(S.getIslCtx(), 0);
2680     Instructions = isl_ast_expr_from_val(Zero);
2681 
2682     for (ScopStmt &Stmt : S) {
2683       isl_ast_expr *StmtInstructions = approxDynamicInst(Stmt, Build);
2684       Instructions = isl_ast_expr_add(Instructions, StmtInstructions);
2685     }
2686     return Instructions;
2687   }
2688 
2689   /// Create a check that ensures sufficient compute in scop.
2690   ///
2691   /// @param S     The scop for which to ensure sufficient compute.
2692   /// @param Build The isl ast build object to use for creating the ast
2693   ///              expression.
2694   /// @returns An expression that evaluates to TRUE in case of sufficient
2695   ///          compute and to FALSE, otherwise.
2696   __isl_give isl_ast_expr *
2697   createSufficientComputeCheck(Scop &S, __isl_keep isl_ast_build *Build) {
2698     auto Iterations = getNumberOfIterations(S, Build);
2699     auto *MinComputeVal = isl_val_int_from_si(S.getIslCtx(), MinCompute);
2700     auto *MinComputeExpr = isl_ast_expr_from_val(MinComputeVal);
2701     return isl_ast_expr_ge(Iterations, MinComputeExpr);
2702   }
2703 
2704   /// Check if the basic block contains a function we cannot codegen for GPU
2705   /// kernels.
2706   ///
2707   /// If this basic block does something with a `Function` other than calling
2708   /// a function that we support in a kernel, return true.
2709   bool containsInvalidKernelFunctionInBllock(const BasicBlock *BB) {
2710     for (const Instruction &Inst : *BB) {
2711       const CallInst *Call = dyn_cast<CallInst>(&Inst);
2712       if (Call && isValidFunctionInKernel(Call->getCalledFunction())) {
2713         continue;
2714       }
2715 
2716       for (Value *SrcVal : Inst.operands()) {
2717         PointerType *p = dyn_cast<PointerType>(SrcVal->getType());
2718         if (!p)
2719           continue;
2720         if (isa<FunctionType>(p->getElementType()))
2721           return true;
2722       }
2723     }
2724     return false;
2725   }
2726 
2727   /// Return whether the Scop S uses functions in a way that we do not support.
2728   bool containsInvalidKernelFunction(const Scop &S) {
2729     for (auto &Stmt : S) {
2730       if (Stmt.isBlockStmt()) {
2731         if (containsInvalidKernelFunctionInBllock(Stmt.getBasicBlock()))
2732           return true;
2733       } else {
2734         assert(Stmt.isRegionStmt() &&
2735                "Stmt was neither block nor region statement");
2736         for (const BasicBlock *BB : Stmt.getRegion()->blocks())
2737           if (containsInvalidKernelFunctionInBllock(BB))
2738             return true;
2739       }
2740     }
2741     return false;
2742   }
2743 
2744   /// Generate code for a given GPU AST described by @p Root.
2745   ///
2746   /// @param Root An isl_ast_node pointing to the root of the GPU AST.
2747   /// @param Prog The GPU Program to generate code for.
2748   void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) {
2749     ScopAnnotator Annotator;
2750     Annotator.buildAliasScopes(*S);
2751 
2752     Region *R = &S->getRegion();
2753 
2754     simplifyRegion(R, DT, LI, RI);
2755 
2756     BasicBlock *EnteringBB = R->getEnteringBlock();
2757 
2758     PollyIRBuilder Builder = createPollyIRBuilder(EnteringBB, Annotator);
2759 
2760     // Only build the run-time condition and parameters _after_ having
2761     // introduced the conditional branch. This is important as the conditional
2762     // branch will guard the original scop from new induction variables that
2763     // the SCEVExpander may introduce while code generating the parameters and
2764     // which may introduce scalar dependences that prevent us from correctly
2765     // code generating this scop.
2766     BBPair StartExitBlocks =
2767         executeScopConditionally(*S, Builder.getTrue(), *DT, *RI, *LI);
2768     BasicBlock *StartBlock = std::get<0>(StartExitBlocks);
2769 
2770     GPUNodeBuilder NodeBuilder(Builder, Annotator, *DL, *LI, *SE, *DT, *S,
2771                                StartBlock, Prog, Runtime, Architecture);
2772 
2773     // TODO: Handle LICM
2774     auto SplitBlock = StartBlock->getSinglePredecessor();
2775     Builder.SetInsertPoint(SplitBlock->getTerminator());
2776     NodeBuilder.addParameters(S->getContext());
2777 
2778     isl_ast_build *Build = isl_ast_build_alloc(S->getIslCtx());
2779     isl_ast_expr *Condition = IslAst::buildRunCondition(*S, Build);
2780     isl_ast_expr *SufficientCompute = createSufficientComputeCheck(*S, Build);
2781     Condition = isl_ast_expr_and(Condition, SufficientCompute);
2782     isl_ast_build_free(Build);
2783 
2784     Value *RTC = NodeBuilder.createRTC(Condition);
2785     Builder.GetInsertBlock()->getTerminator()->setOperand(0, RTC);
2786 
2787     Builder.SetInsertPoint(&*StartBlock->begin());
2788 
2789     NodeBuilder.initializeAfterRTH();
2790     NodeBuilder.create(Root);
2791     NodeBuilder.finalize();
2792 
2793     /// In case a sequential kernel has more surrounding loops as any parallel
2794     /// kernel, the SCoP is probably mostly sequential. Hence, there is no
2795     /// point in running it on a GPU.
2796     if (NodeBuilder.DeepestSequential > NodeBuilder.DeepestParallel)
2797       SplitBlock->getTerminator()->setOperand(0, Builder.getFalse());
2798 
2799     if (!NodeBuilder.BuildSuccessful)
2800       SplitBlock->getTerminator()->setOperand(0, Builder.getFalse());
2801   }
2802 
2803   bool runOnScop(Scop &CurrentScop) override {
2804     S = &CurrentScop;
2805     LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2806     DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2807     SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2808     DL = &S->getRegion().getEntry()->getModule()->getDataLayout();
2809     RI = &getAnalysis<RegionInfoPass>().getRegionInfo();
2810 
2811     // We currently do not support functions other than intrinsics inside
2812     // kernels, as code generation will need to offload function calls to the
2813     // kernel. This may lead to a kernel trying to call a function on the host.
2814     // This also allows us to prevent codegen from trying to take the
2815     // address of an intrinsic function to send to the kernel.
2816     if (containsInvalidKernelFunction(CurrentScop)) {
2817       DEBUG(
2818           dbgs()
2819               << "Scop contains function which cannot be materialised in a GPU "
2820                  "kernel. Bailing out.\n";);
2821       return false;
2822     }
2823 
2824     auto PPCGScop = createPPCGScop();
2825     auto PPCGProg = createPPCGProg(PPCGScop);
2826     auto PPCGGen = generateGPU(PPCGScop, PPCGProg);
2827 
2828     if (PPCGGen->tree)
2829       generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg);
2830 
2831     freeOptions(PPCGScop);
2832     freePPCGGen(PPCGGen);
2833     gpu_prog_free(PPCGProg);
2834     ppcg_scop_free(PPCGScop);
2835 
2836     return true;
2837   }
2838 
2839   void printScop(raw_ostream &, Scop &) const override {}
2840 
2841   void getAnalysisUsage(AnalysisUsage &AU) const override {
2842     AU.addRequired<DominatorTreeWrapperPass>();
2843     AU.addRequired<RegionInfoPass>();
2844     AU.addRequired<ScalarEvolutionWrapperPass>();
2845     AU.addRequired<ScopDetectionWrapperPass>();
2846     AU.addRequired<ScopInfoRegionPass>();
2847     AU.addRequired<LoopInfoWrapperPass>();
2848 
2849     AU.addPreserved<AAResultsWrapperPass>();
2850     AU.addPreserved<BasicAAWrapperPass>();
2851     AU.addPreserved<LoopInfoWrapperPass>();
2852     AU.addPreserved<DominatorTreeWrapperPass>();
2853     AU.addPreserved<GlobalsAAWrapperPass>();
2854     AU.addPreserved<ScopDetectionWrapperPass>();
2855     AU.addPreserved<ScalarEvolutionWrapperPass>();
2856     AU.addPreserved<SCEVAAWrapperPass>();
2857 
2858     // FIXME: We do not yet add regions for the newly generated code to the
2859     //        region tree.
2860     AU.addPreserved<RegionInfoPass>();
2861     AU.addPreserved<ScopInfoRegionPass>();
2862   }
2863 };
2864 } // namespace
2865 
2866 char PPCGCodeGeneration::ID = 1;
2867 
2868 Pass *polly::createPPCGCodeGenerationPass(GPUArch Arch, GPURuntime Runtime) {
2869   PPCGCodeGeneration *generator = new PPCGCodeGeneration();
2870   generator->Runtime = Runtime;
2871   generator->Architecture = Arch;
2872   return generator;
2873 }
2874 
2875 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg",
2876                       "Polly - Apply PPCG translation to SCOP", false, false)
2877 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
2878 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2879 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2880 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2881 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2882 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2883 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg",
2884                     "Polly - Apply PPCG translation to SCOP", false, false)
2885