1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Take a scop created by ScopInfo and map it to GPU code using the ppcg 11 // GPU mapping strategy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "polly/CodeGen/PPCGCodeGeneration.h" 16 #include "polly/CodeGen/IslAst.h" 17 #include "polly/CodeGen/IslNodeBuilder.h" 18 #include "polly/CodeGen/Utils.h" 19 #include "polly/DependenceInfo.h" 20 #include "polly/LinkAllPasses.h" 21 #include "polly/Options.h" 22 #include "polly/ScopDetection.h" 23 #include "polly/ScopInfo.h" 24 #include "polly/Support/SCEVValidator.h" 25 #include "llvm/ADT/PostOrderIterator.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/BasicAliasAnalysis.h" 28 #include "llvm/Analysis/GlobalsModRef.h" 29 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 30 #include "llvm/Analysis/TargetLibraryInfo.h" 31 #include "llvm/Analysis/TargetTransformInfo.h" 32 #include "llvm/IR/LegacyPassManager.h" 33 #include "llvm/IR/Verifier.h" 34 #include "llvm/IRReader/IRReader.h" 35 #include "llvm/Linker/Linker.h" 36 #include "llvm/Support/TargetRegistry.h" 37 #include "llvm/Support/TargetSelect.h" 38 #include "llvm/Target/TargetMachine.h" 39 #include "llvm/Transforms/IPO/PassManagerBuilder.h" 40 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 41 42 #include "isl/union_map.h" 43 44 extern "C" { 45 #include "ppcg/cuda.h" 46 #include "ppcg/gpu.h" 47 #include "ppcg/gpu_print.h" 48 #include "ppcg/ppcg.h" 49 #include "ppcg/schedule.h" 50 } 51 52 #include "llvm/Support/Debug.h" 53 54 using namespace polly; 55 using namespace llvm; 56 57 #define DEBUG_TYPE "polly-codegen-ppcg" 58 59 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule", 60 cl::desc("Dump the computed GPU Schedule"), 61 cl::Hidden, cl::init(false), cl::ZeroOrMore, 62 cl::cat(PollyCategory)); 63 64 static cl::opt<bool> 65 DumpCode("polly-acc-dump-code", 66 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden, 67 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 68 69 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir", 70 cl::desc("Dump the kernel LLVM-IR"), 71 cl::Hidden, cl::init(false), cl::ZeroOrMore, 72 cl::cat(PollyCategory)); 73 74 static cl::opt<bool> DumpKernelASM("polly-acc-dump-kernel-asm", 75 cl::desc("Dump the kernel assembly code"), 76 cl::Hidden, cl::init(false), cl::ZeroOrMore, 77 cl::cat(PollyCategory)); 78 79 static cl::opt<bool> FastMath("polly-acc-fastmath", 80 cl::desc("Allow unsafe math optimizations"), 81 cl::Hidden, cl::init(false), cl::ZeroOrMore, 82 cl::cat(PollyCategory)); 83 static cl::opt<bool> SharedMemory("polly-acc-use-shared", 84 cl::desc("Use shared memory"), cl::Hidden, 85 cl::init(false), cl::ZeroOrMore, 86 cl::cat(PollyCategory)); 87 static cl::opt<bool> PrivateMemory("polly-acc-use-private", 88 cl::desc("Use private memory"), cl::Hidden, 89 cl::init(false), cl::ZeroOrMore, 90 cl::cat(PollyCategory)); 91 92 static cl::opt<bool> ManagedMemory("polly-acc-codegen-managed-memory", 93 cl::desc("Generate Host kernel code assuming" 94 " that all memory has been" 95 " declared as managed memory"), 96 cl::Hidden, cl::init(false), cl::ZeroOrMore, 97 cl::cat(PollyCategory)); 98 99 static cl::opt<bool> 100 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure", 101 cl::desc("Fail and generate a backtrace if" 102 " verifyModule fails on the GPU " 103 " kernel module."), 104 cl::Hidden, cl::init(false), cl::ZeroOrMore, 105 cl::cat(PollyCategory)); 106 107 static cl::opt<std::string> CUDALibDevice( 108 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden, 109 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"), 110 cl::ZeroOrMore, cl::cat(PollyCategory)); 111 112 static cl::opt<std::string> 113 CudaVersion("polly-acc-cuda-version", 114 cl::desc("The CUDA version to compile for"), cl::Hidden, 115 cl::init("sm_30"), cl::ZeroOrMore, cl::cat(PollyCategory)); 116 117 static cl::opt<int> 118 MinCompute("polly-acc-mincompute", 119 cl::desc("Minimal number of compute statements to run on GPU."), 120 cl::Hidden, cl::init(10 * 512 * 512)); 121 122 /// Used to store information PPCG wants for kills. This information is 123 /// used by live range reordering. 124 /// 125 /// @see computeLiveRangeReordering 126 /// @see GPUNodeBuilder::createPPCGScop 127 /// @see GPUNodeBuilder::createPPCGProg 128 struct MustKillsInfo { 129 /// Collection of all kill statements that will be sequenced at the end of 130 /// PPCGScop->schedule. 131 /// 132 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set` 133 /// which merges schedules in *arbitrary* order. 134 /// (we don't care about the order of the kills anyway). 135 isl::schedule KillsSchedule; 136 /// Map from kill statement instances to scalars that need to be 137 /// killed. 138 /// 139 /// We currently derive kill information for: 140 /// 1. phi nodes. PHI nodes are not alive outside the scop and can 141 /// consequently all be killed. 142 /// 2. Scalar arrays that are not used outside the Scop. This is 143 /// checked by `isScalarUsesContainedInScop`. 144 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] } 145 isl::union_map TaggedMustKills; 146 147 /// Tagged must kills stripped of the tags. 148 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] } 149 isl::union_map MustKills; 150 151 MustKillsInfo() : KillsSchedule(nullptr) {} 152 }; 153 154 /// Check if SAI's uses are entirely contained within Scop S. 155 /// If a scalar is used only with a Scop, we are free to kill it, as no data 156 /// can flow in/out of the value any more. 157 /// @see computeMustKillsInfo 158 static bool isScalarUsesContainedInScop(const Scop &S, 159 const ScopArrayInfo *SAI) { 160 assert(SAI->isValueKind() && "this function only deals with scalars." 161 " Dealing with arrays required alias analysis"); 162 163 const Region &R = S.getRegion(); 164 for (User *U : SAI->getBasePtr()->users()) { 165 Instruction *I = dyn_cast<Instruction>(U); 166 assert(I && "invalid user of scop array info"); 167 if (!R.contains(I)) 168 return false; 169 } 170 return true; 171 } 172 173 /// Compute must-kills needed to enable live range reordering with PPCG. 174 /// 175 /// @params S The Scop to compute live range reordering information 176 /// @returns live range reordering information that can be used to setup 177 /// PPCG. 178 static MustKillsInfo computeMustKillsInfo(const Scop &S) { 179 const isl::space ParamSpace(isl::manage(S.getParamSpace())); 180 MustKillsInfo Info; 181 182 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria: 183 // 1.1 phi nodes in scop. 184 // 1.2 scalars that are only used within the scop 185 SmallVector<isl::id, 4> KillMemIds; 186 for (ScopArrayInfo *SAI : S.arrays()) { 187 if (SAI->isPHIKind() || 188 (SAI->isValueKind() && isScalarUsesContainedInScop(S, SAI))) 189 KillMemIds.push_back(isl::manage(SAI->getBasePtrId().release())); 190 } 191 192 Info.TaggedMustKills = isl::union_map::empty(isl::space(ParamSpace)); 193 Info.MustKills = isl::union_map::empty(isl::space(ParamSpace)); 194 195 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the 196 // schedule: 197 // - filter: "[control] -> { }" 198 // So, we choose to not create this to keep the output a little nicer, 199 // at the cost of some code complexity. 200 Info.KillsSchedule = nullptr; 201 202 for (isl::id &ToKillId : KillMemIds) { 203 isl::id KillStmtId = isl::id::alloc( 204 S.getIslCtx(), 205 std::string("SKill_phantom_").append(ToKillId.get_name()), nullptr); 206 207 // NOTE: construction of tagged_must_kill: 208 // 2. We need to construct a map: 209 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] } 210 // To construct this, we use `isl_map_domain_product` on 2 maps`: 211 // 2a. StmtToScalar: 212 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] } 213 // 2b. PhantomRefToScalar: 214 // [param] -> { ref_phantom[] -> scalar_to_kill[] } 215 // 216 // Combining these with `isl_map_domain_product` gives us 217 // TaggedMustKill: 218 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] } 219 220 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] } 221 isl::map StmtToScalar = isl::map::universe(isl::space(ParamSpace)); 222 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::in, isl::id(KillStmtId)); 223 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::out, isl::id(ToKillId)); 224 225 isl::id PhantomRefId = isl::id::alloc( 226 S.getIslCtx(), std::string("ref_phantom") + ToKillId.get_name(), 227 nullptr); 228 229 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] } 230 isl::map PhantomRefToScalar = isl::map::universe(isl::space(ParamSpace)); 231 PhantomRefToScalar = 232 PhantomRefToScalar.set_tuple_id(isl::dim::in, PhantomRefId); 233 PhantomRefToScalar = 234 PhantomRefToScalar.set_tuple_id(isl::dim::out, ToKillId); 235 236 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] } 237 isl::map TaggedMustKill = StmtToScalar.domain_product(PhantomRefToScalar); 238 Info.TaggedMustKills = Info.TaggedMustKills.unite(TaggedMustKill); 239 240 // 2. [param] -> { Stmt[] -> scalar_to_kill[] } 241 Info.MustKills = Info.TaggedMustKills.domain_factor_domain(); 242 243 // 3. Create the kill schedule of the form: 244 // "[param] -> { Stmt_phantom[] }" 245 // Then add this to Info.KillsSchedule. 246 isl::space KillStmtSpace = ParamSpace; 247 KillStmtSpace = KillStmtSpace.set_tuple_id(isl::dim::set, KillStmtId); 248 isl::union_set KillStmtDomain = isl::set::universe(KillStmtSpace); 249 250 isl::schedule KillSchedule = isl::schedule::from_domain(KillStmtDomain); 251 if (Info.KillsSchedule) 252 Info.KillsSchedule = Info.KillsSchedule.set(KillSchedule); 253 else 254 Info.KillsSchedule = KillSchedule; 255 } 256 257 return Info; 258 } 259 260 /// Create the ast expressions for a ScopStmt. 261 /// 262 /// This function is a callback for to generate the ast expressions for each 263 /// of the scheduled ScopStmts. 264 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt( 265 void *StmtT, __isl_take isl_ast_build *Build_C, 266 isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA, 267 isl_id *Id, void *User), 268 void *UserIndex, 269 isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User), 270 void *UserExpr) { 271 272 ScopStmt *Stmt = (ScopStmt *)StmtT; 273 274 if (!Stmt || !Build_C) 275 return NULL; 276 277 isl::ast_build Build = isl::manage(isl_ast_build_copy(Build_C)); 278 isl::ctx Ctx = Build.get_ctx(); 279 isl::id_to_ast_expr RefToExpr = isl::id_to_ast_expr::alloc(Ctx, 0); 280 281 for (MemoryAccess *Acc : *Stmt) { 282 isl::map AddrFunc = Acc->getAddressFunction(); 283 AddrFunc = AddrFunc.intersect_domain(isl::manage(Stmt->getDomain())); 284 285 isl::id RefId = Acc->getId(); 286 isl::pw_multi_aff PMA = isl::pw_multi_aff::from_map(AddrFunc); 287 288 isl::multi_pw_aff MPA = isl::multi_pw_aff(PMA); 289 MPA = MPA.coalesce(); 290 MPA = isl::manage(FunctionIndex(MPA.release(), RefId.get(), UserIndex)); 291 292 isl::ast_expr Access = Build.access_from(MPA); 293 Access = isl::manage(FunctionExpr(Access.release(), RefId.get(), UserExpr)); 294 RefToExpr = RefToExpr.set(RefId, Access); 295 } 296 297 return RefToExpr.release(); 298 } 299 300 /// Given a LLVM Type, compute its size in bytes, 301 static int computeSizeInBytes(const Type *T) { 302 int bytes = T->getPrimitiveSizeInBits() / 8; 303 if (bytes == 0) 304 bytes = T->getScalarSizeInBits() / 8; 305 return bytes; 306 } 307 308 /// Generate code for a GPU specific isl AST. 309 /// 310 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which 311 /// generates code for general-purpose AST nodes, with special functionality 312 /// for generating GPU specific user nodes. 313 /// 314 /// @see GPUNodeBuilder::createUser 315 class GPUNodeBuilder : public IslNodeBuilder { 316 public: 317 GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator, 318 const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE, 319 DominatorTree &DT, Scop &S, BasicBlock *StartBlock, 320 gpu_prog *Prog, GPURuntime Runtime, GPUArch Arch) 321 : IslNodeBuilder(Builder, Annotator, DL, LI, SE, DT, S, StartBlock), 322 Prog(Prog), Runtime(Runtime), Arch(Arch) { 323 getExprBuilder().setIDToSAI(&IDToSAI); 324 } 325 326 /// Create after-run-time-check initialization code. 327 void initializeAfterRTH(); 328 329 /// Finalize the generated scop. 330 virtual void finalize(); 331 332 /// Track if the full build process was successful. 333 /// 334 /// This value is set to false, if throughout the build process an error 335 /// occurred which prevents us from generating valid GPU code. 336 bool BuildSuccessful = true; 337 338 /// The maximal number of loops surrounding a sequential kernel. 339 unsigned DeepestSequential = 0; 340 341 /// The maximal number of loops surrounding a parallel kernel. 342 unsigned DeepestParallel = 0; 343 344 /// Return the name to set for the ptx_kernel. 345 std::string getKernelFuncName(int Kernel_id); 346 347 private: 348 /// A vector of array base pointers for which a new ScopArrayInfo was created. 349 /// 350 /// This vector is used to delete the ScopArrayInfo when it is not needed any 351 /// more. 352 std::vector<Value *> LocalArrays; 353 354 /// A map from ScopArrays to their corresponding device allocations. 355 std::map<ScopArrayInfo *, Value *> DeviceAllocations; 356 357 /// The current GPU context. 358 Value *GPUContext; 359 360 /// The set of isl_ids allocated in the kernel 361 std::vector<isl_id *> KernelIds; 362 363 /// A module containing GPU code. 364 /// 365 /// This pointer is only set in case we are currently generating GPU code. 366 std::unique_ptr<Module> GPUModule; 367 368 /// The GPU program we generate code for. 369 gpu_prog *Prog; 370 371 /// The GPU Runtime implementation to use (OpenCL or CUDA). 372 GPURuntime Runtime; 373 374 /// The GPU Architecture to target. 375 GPUArch Arch; 376 377 /// Class to free isl_ids. 378 class IslIdDeleter { 379 public: 380 void operator()(__isl_take isl_id *Id) { isl_id_free(Id); }; 381 }; 382 383 /// A set containing all isl_ids allocated in a GPU kernel. 384 /// 385 /// By releasing this set all isl_ids will be freed. 386 std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs; 387 388 IslExprBuilder::IDToScopArrayInfoTy IDToSAI; 389 390 /// Create code for user-defined AST nodes. 391 /// 392 /// These AST nodes can be of type: 393 /// 394 /// - ScopStmt: A computational statement (TODO) 395 /// - Kernel: A GPU kernel call (TODO) 396 /// - Data-Transfer: A GPU <-> CPU data-transfer 397 /// - In-kernel synchronization 398 /// - In-kernel memory copy statement 399 /// 400 /// @param UserStmt The ast node to generate code for. 401 virtual void createUser(__isl_take isl_ast_node *UserStmt); 402 403 enum DataDirection { HOST_TO_DEVICE, DEVICE_TO_HOST }; 404 405 /// Create code for a data transfer statement 406 /// 407 /// @param TransferStmt The data transfer statement. 408 /// @param Direction The direction in which to transfer data. 409 void createDataTransfer(__isl_take isl_ast_node *TransferStmt, 410 enum DataDirection Direction); 411 412 /// Find llvm::Values referenced in GPU kernel. 413 /// 414 /// @param Kernel The kernel to scan for llvm::Values 415 /// 416 /// @returns A pair, whose first element contains the set of values 417 /// referenced by the kernel, and whose second element contains the 418 /// set of functions referenced by the kernel. All functions in the 419 /// second set satisfy isValidFunctionInKernel. 420 std::pair<SetVector<Value *>, SetVector<Function *>> 421 getReferencesInKernel(ppcg_kernel *Kernel); 422 423 /// Compute the sizes of the execution grid for a given kernel. 424 /// 425 /// @param Kernel The kernel to compute grid sizes for. 426 /// 427 /// @returns A tuple with grid sizes for X and Y dimension 428 std::tuple<Value *, Value *> getGridSizes(ppcg_kernel *Kernel); 429 430 /// Creates a array that can be sent to the kernel on the device using a 431 /// host pointer. This is required for managed memory, when we directly send 432 /// host pointers to the device. 433 /// \note 434 /// This is to be used only with managed memory 435 Value *getOrCreateManagedDeviceArray(gpu_array_info *Array, 436 ScopArrayInfo *ArrayInfo); 437 438 /// Compute the sizes of the thread blocks for a given kernel. 439 /// 440 /// @param Kernel The kernel to compute thread block sizes for. 441 /// 442 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions. 443 std::tuple<Value *, Value *, Value *> getBlockSizes(ppcg_kernel *Kernel); 444 445 /// Store a specific kernel launch parameter in the array of kernel launch 446 /// parameters. 447 /// 448 /// @param Parameters The list of parameters in which to store. 449 /// @param Param The kernel launch parameter to store. 450 /// @param Index The index in the parameter list, at which to store the 451 /// parameter. 452 void insertStoreParameter(Instruction *Parameters, Instruction *Param, 453 int Index); 454 455 /// Create kernel launch parameters. 456 /// 457 /// @param Kernel The kernel to create parameters for. 458 /// @param F The kernel function that has been created. 459 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 460 /// 461 /// @returns A stack allocated array with pointers to the parameter 462 /// values that are passed to the kernel. 463 Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F, 464 SetVector<Value *> SubtreeValues); 465 466 /// Create declarations for kernel variable. 467 /// 468 /// This includes shared memory declarations. 469 /// 470 /// @param Kernel The kernel definition to create variables for. 471 /// @param FN The function into which to generate the variables. 472 void createKernelVariables(ppcg_kernel *Kernel, Function *FN); 473 474 /// Add CUDA annotations to module. 475 /// 476 /// Add a set of CUDA annotations that declares the maximal block dimensions 477 /// that will be used to execute the CUDA kernel. This allows the NVIDIA 478 /// PTX compiler to bound the number of allocated registers to ensure the 479 /// resulting kernel is known to run with up to as many block dimensions 480 /// as specified here. 481 /// 482 /// @param M The module to add the annotations to. 483 /// @param BlockDimX The size of block dimension X. 484 /// @param BlockDimY The size of block dimension Y. 485 /// @param BlockDimZ The size of block dimension Z. 486 void addCUDAAnnotations(Module *M, Value *BlockDimX, Value *BlockDimY, 487 Value *BlockDimZ); 488 489 /// Create GPU kernel. 490 /// 491 /// Code generate the kernel described by @p KernelStmt. 492 /// 493 /// @param KernelStmt The ast node to generate kernel code for. 494 void createKernel(__isl_take isl_ast_node *KernelStmt); 495 496 /// Generate code that computes the size of an array. 497 /// 498 /// @param Array The array for which to compute a size. 499 Value *getArraySize(gpu_array_info *Array); 500 501 /// Generate code to compute the minimal offset at which an array is accessed. 502 /// 503 /// The offset of an array is the minimal array location accessed in a scop. 504 /// 505 /// Example: 506 /// 507 /// for (long i = 0; i < 100; i++) 508 /// A[i + 42] += ... 509 /// 510 /// getArrayOffset(A) results in 42. 511 /// 512 /// @param Array The array for which to compute the offset. 513 /// @returns An llvm::Value that contains the offset of the array. 514 Value *getArrayOffset(gpu_array_info *Array); 515 516 /// Prepare the kernel arguments for kernel code generation 517 /// 518 /// @param Kernel The kernel to generate code for. 519 /// @param FN The function created for the kernel. 520 void prepareKernelArguments(ppcg_kernel *Kernel, Function *FN); 521 522 /// Create kernel function. 523 /// 524 /// Create a kernel function located in a newly created module that can serve 525 /// as target for device code generation. Set the Builder to point to the 526 /// start block of this newly created function. 527 /// 528 /// @param Kernel The kernel to generate code for. 529 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 530 /// @param SubtreeFunctions The set of llvm::Functions referenced by this 531 /// kernel. 532 void createKernelFunction(ppcg_kernel *Kernel, 533 SetVector<Value *> &SubtreeValues, 534 SetVector<Function *> &SubtreeFunctions); 535 536 /// Create the declaration of a kernel function. 537 /// 538 /// The kernel function takes as arguments: 539 /// 540 /// - One i8 pointer for each external array reference used in the kernel. 541 /// - Host iterators 542 /// - Parameters 543 /// - Other LLVM Value references (TODO) 544 /// 545 /// @param Kernel The kernel to generate the function declaration for. 546 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 547 /// 548 /// @returns The newly declared function. 549 Function *createKernelFunctionDecl(ppcg_kernel *Kernel, 550 SetVector<Value *> &SubtreeValues); 551 552 /// Insert intrinsic functions to obtain thread and block ids. 553 /// 554 /// @param The kernel to generate the intrinsic functions for. 555 void insertKernelIntrinsics(ppcg_kernel *Kernel); 556 557 /// Insert function calls to retrieve the SPIR group/local ids. 558 /// 559 /// @param The kernel to generate the function calls for. 560 void insertKernelCallsSPIR(ppcg_kernel *Kernel); 561 562 /// Setup the creation of functions referenced by the GPU kernel. 563 /// 564 /// 1. Create new function declarations in GPUModule which are the same as 565 /// SubtreeFunctions. 566 /// 567 /// 2. Populate IslNodeBuilder::ValueMap with mappings from 568 /// old functions (that come from the original module) to new functions 569 /// (that are created within GPUModule). That way, we generate references 570 /// to the correct function (in GPUModule) in BlockGenerator. 571 /// 572 /// @see IslNodeBuilder::ValueMap 573 /// @see BlockGenerator::GlobalMap 574 /// @see BlockGenerator::getNewValue 575 /// @see GPUNodeBuilder::getReferencesInKernel. 576 /// 577 /// @param SubtreeFunctions The set of llvm::Functions referenced by 578 /// this kernel. 579 void setupKernelSubtreeFunctions(SetVector<Function *> SubtreeFunctions); 580 581 /// Create a global-to-shared or shared-to-global copy statement. 582 /// 583 /// @param CopyStmt The copy statement to generate code for 584 void createKernelCopy(ppcg_kernel_stmt *CopyStmt); 585 586 /// Create code for a ScopStmt called in @p Expr. 587 /// 588 /// @param Expr The expression containing the call. 589 /// @param KernelStmt The kernel statement referenced in the call. 590 void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt); 591 592 /// Create an in-kernel synchronization call. 593 void createKernelSync(); 594 595 /// Create a PTX assembly string for the current GPU kernel. 596 /// 597 /// @returns A string containing the corresponding PTX assembly code. 598 std::string createKernelASM(); 599 600 /// Remove references from the dominator tree to the kernel function @p F. 601 /// 602 /// @param F The function to remove references to. 603 void clearDominators(Function *F); 604 605 /// Remove references from scalar evolution to the kernel function @p F. 606 /// 607 /// @param F The function to remove references to. 608 void clearScalarEvolution(Function *F); 609 610 /// Remove references from loop info to the kernel function @p F. 611 /// 612 /// @param F The function to remove references to. 613 void clearLoops(Function *F); 614 615 /// Check if the scop requires to be linked with CUDA's libdevice. 616 bool requiresCUDALibDevice(); 617 618 /// Link with the NVIDIA libdevice library (if needed and available). 619 void addCUDALibDevice(); 620 621 /// Finalize the generation of the kernel function. 622 /// 623 /// Free the LLVM-IR module corresponding to the kernel and -- if requested -- 624 /// dump its IR to stderr. 625 /// 626 /// @returns The Assembly string of the kernel. 627 std::string finalizeKernelFunction(); 628 629 /// Finalize the generation of the kernel arguments. 630 /// 631 /// This function ensures that not-read-only scalars used in a kernel are 632 /// stored back to the global memory location they are backed with before 633 /// the kernel terminates. 634 /// 635 /// @params Kernel The kernel to finalize kernel arguments for. 636 void finalizeKernelArguments(ppcg_kernel *Kernel); 637 638 /// Create code that allocates memory to store arrays on device. 639 void allocateDeviceArrays(); 640 641 /// Free all allocated device arrays. 642 void freeDeviceArrays(); 643 644 /// Create a call to initialize the GPU context. 645 /// 646 /// @returns A pointer to the newly initialized context. 647 Value *createCallInitContext(); 648 649 /// Create a call to get the device pointer for a kernel allocation. 650 /// 651 /// @param Allocation The Polly GPU allocation 652 /// 653 /// @returns The device parameter corresponding to this allocation. 654 Value *createCallGetDevicePtr(Value *Allocation); 655 656 /// Create a call to free the GPU context. 657 /// 658 /// @param Context A pointer to an initialized GPU context. 659 void createCallFreeContext(Value *Context); 660 661 /// Create a call to allocate memory on the device. 662 /// 663 /// @param Size The size of memory to allocate 664 /// 665 /// @returns A pointer that identifies this allocation. 666 Value *createCallAllocateMemoryForDevice(Value *Size); 667 668 /// Create a call to free a device array. 669 /// 670 /// @param Array The device array to free. 671 void createCallFreeDeviceMemory(Value *Array); 672 673 /// Create a call to copy data from host to device. 674 /// 675 /// @param HostPtr A pointer to the host data that should be copied. 676 /// @param DevicePtr A device pointer specifying the location to copy to. 677 void createCallCopyFromHostToDevice(Value *HostPtr, Value *DevicePtr, 678 Value *Size); 679 680 /// Create a call to copy data from device to host. 681 /// 682 /// @param DevicePtr A pointer to the device data that should be copied. 683 /// @param HostPtr A host pointer specifying the location to copy to. 684 void createCallCopyFromDeviceToHost(Value *DevicePtr, Value *HostPtr, 685 Value *Size); 686 687 /// Create a call to synchronize Host & Device. 688 /// \note 689 /// This is to be used only with managed memory. 690 void createCallSynchronizeDevice(); 691 692 /// Create a call to get a kernel from an assembly string. 693 /// 694 /// @param Buffer The string describing the kernel. 695 /// @param Entry The name of the kernel function to call. 696 /// 697 /// @returns A pointer to a kernel object 698 Value *createCallGetKernel(Value *Buffer, Value *Entry); 699 700 /// Create a call to free a GPU kernel. 701 /// 702 /// @param GPUKernel THe kernel to free. 703 void createCallFreeKernel(Value *GPUKernel); 704 705 /// Create a call to launch a GPU kernel. 706 /// 707 /// @param GPUKernel The kernel to launch. 708 /// @param GridDimX The size of the first grid dimension. 709 /// @param GridDimY The size of the second grid dimension. 710 /// @param GridBlockX The size of the first block dimension. 711 /// @param GridBlockY The size of the second block dimension. 712 /// @param GridBlockZ The size of the third block dimension. 713 /// @param Parameters A pointer to an array that contains itself pointers to 714 /// the parameter values passed for each kernel argument. 715 void createCallLaunchKernel(Value *GPUKernel, Value *GridDimX, 716 Value *GridDimY, Value *BlockDimX, 717 Value *BlockDimY, Value *BlockDimZ, 718 Value *Parameters); 719 }; 720 721 std::string GPUNodeBuilder::getKernelFuncName(int Kernel_id) { 722 return "FUNC_" + S.getFunction().getName().str() + "_SCOP_" + 723 std::to_string(S.getID()) + "_KERNEL_" + std::to_string(Kernel_id); 724 } 725 726 void GPUNodeBuilder::initializeAfterRTH() { 727 BasicBlock *NewBB = SplitBlock(Builder.GetInsertBlock(), 728 &*Builder.GetInsertPoint(), &DT, &LI); 729 NewBB->setName("polly.acc.initialize"); 730 Builder.SetInsertPoint(&NewBB->front()); 731 732 GPUContext = createCallInitContext(); 733 734 if (!ManagedMemory) 735 allocateDeviceArrays(); 736 } 737 738 void GPUNodeBuilder::finalize() { 739 if (!ManagedMemory) 740 freeDeviceArrays(); 741 742 createCallFreeContext(GPUContext); 743 IslNodeBuilder::finalize(); 744 } 745 746 void GPUNodeBuilder::allocateDeviceArrays() { 747 assert(!ManagedMemory && "Managed memory will directly send host pointers " 748 "to the kernel. There is no need for device arrays"); 749 isl_ast_build *Build = isl_ast_build_from_context(S.getContext()); 750 751 for (int i = 0; i < Prog->n_array; ++i) { 752 gpu_array_info *Array = &Prog->array[i]; 753 auto *ScopArray = (ScopArrayInfo *)Array->user; 754 std::string DevArrayName("p_dev_array_"); 755 DevArrayName.append(Array->name); 756 757 Value *ArraySize = getArraySize(Array); 758 Value *Offset = getArrayOffset(Array); 759 if (Offset) 760 ArraySize = Builder.CreateSub( 761 ArraySize, 762 Builder.CreateMul(Offset, 763 Builder.getInt64(ScopArray->getElemSizeInBytes()))); 764 Value *DevArray = createCallAllocateMemoryForDevice(ArraySize); 765 DevArray->setName(DevArrayName); 766 DeviceAllocations[ScopArray] = DevArray; 767 } 768 769 isl_ast_build_free(Build); 770 } 771 772 void GPUNodeBuilder::addCUDAAnnotations(Module *M, Value *BlockDimX, 773 Value *BlockDimY, Value *BlockDimZ) { 774 auto AnnotationNode = M->getOrInsertNamedMetadata("nvvm.annotations"); 775 776 for (auto &F : *M) { 777 if (F.getCallingConv() != CallingConv::PTX_Kernel) 778 continue; 779 780 Value *V[] = {BlockDimX, BlockDimY, BlockDimZ}; 781 782 Metadata *Elements[] = { 783 ValueAsMetadata::get(&F), MDString::get(M->getContext(), "maxntidx"), 784 ValueAsMetadata::get(V[0]), MDString::get(M->getContext(), "maxntidy"), 785 ValueAsMetadata::get(V[1]), MDString::get(M->getContext(), "maxntidz"), 786 ValueAsMetadata::get(V[2]), 787 }; 788 MDNode *Node = MDNode::get(M->getContext(), Elements); 789 AnnotationNode->addOperand(Node); 790 } 791 } 792 793 void GPUNodeBuilder::freeDeviceArrays() { 794 assert(!ManagedMemory && "Managed memory does not use device arrays"); 795 for (auto &Array : DeviceAllocations) 796 createCallFreeDeviceMemory(Array.second); 797 } 798 799 Value *GPUNodeBuilder::createCallGetKernel(Value *Buffer, Value *Entry) { 800 const char *Name = "polly_getKernel"; 801 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 802 Function *F = M->getFunction(Name); 803 804 // If F is not available, declare it. 805 if (!F) { 806 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 807 std::vector<Type *> Args; 808 Args.push_back(Builder.getInt8PtrTy()); 809 Args.push_back(Builder.getInt8PtrTy()); 810 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 811 F = Function::Create(Ty, Linkage, Name, M); 812 } 813 814 return Builder.CreateCall(F, {Buffer, Entry}); 815 } 816 817 Value *GPUNodeBuilder::createCallGetDevicePtr(Value *Allocation) { 818 const char *Name = "polly_getDevicePtr"; 819 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 820 Function *F = M->getFunction(Name); 821 822 // If F is not available, declare it. 823 if (!F) { 824 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 825 std::vector<Type *> Args; 826 Args.push_back(Builder.getInt8PtrTy()); 827 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 828 F = Function::Create(Ty, Linkage, Name, M); 829 } 830 831 return Builder.CreateCall(F, {Allocation}); 832 } 833 834 void GPUNodeBuilder::createCallLaunchKernel(Value *GPUKernel, Value *GridDimX, 835 Value *GridDimY, Value *BlockDimX, 836 Value *BlockDimY, Value *BlockDimZ, 837 Value *Parameters) { 838 const char *Name = "polly_launchKernel"; 839 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 840 Function *F = M->getFunction(Name); 841 842 // If F is not available, declare it. 843 if (!F) { 844 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 845 std::vector<Type *> Args; 846 Args.push_back(Builder.getInt8PtrTy()); 847 Args.push_back(Builder.getInt32Ty()); 848 Args.push_back(Builder.getInt32Ty()); 849 Args.push_back(Builder.getInt32Ty()); 850 Args.push_back(Builder.getInt32Ty()); 851 Args.push_back(Builder.getInt32Ty()); 852 Args.push_back(Builder.getInt8PtrTy()); 853 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 854 F = Function::Create(Ty, Linkage, Name, M); 855 } 856 857 Builder.CreateCall(F, {GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY, 858 BlockDimZ, Parameters}); 859 } 860 861 void GPUNodeBuilder::createCallFreeKernel(Value *GPUKernel) { 862 const char *Name = "polly_freeKernel"; 863 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 864 Function *F = M->getFunction(Name); 865 866 // If F is not available, declare it. 867 if (!F) { 868 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 869 std::vector<Type *> Args; 870 Args.push_back(Builder.getInt8PtrTy()); 871 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 872 F = Function::Create(Ty, Linkage, Name, M); 873 } 874 875 Builder.CreateCall(F, {GPUKernel}); 876 } 877 878 void GPUNodeBuilder::createCallFreeDeviceMemory(Value *Array) { 879 assert(!ManagedMemory && "Managed memory does not allocate or free memory " 880 "for device"); 881 const char *Name = "polly_freeDeviceMemory"; 882 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 883 Function *F = M->getFunction(Name); 884 885 // If F is not available, declare it. 886 if (!F) { 887 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 888 std::vector<Type *> Args; 889 Args.push_back(Builder.getInt8PtrTy()); 890 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 891 F = Function::Create(Ty, Linkage, Name, M); 892 } 893 894 Builder.CreateCall(F, {Array}); 895 } 896 897 Value *GPUNodeBuilder::createCallAllocateMemoryForDevice(Value *Size) { 898 assert(!ManagedMemory && "Managed memory does not allocate or free memory " 899 "for device"); 900 const char *Name = "polly_allocateMemoryForDevice"; 901 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 902 Function *F = M->getFunction(Name); 903 904 // If F is not available, declare it. 905 if (!F) { 906 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 907 std::vector<Type *> Args; 908 Args.push_back(Builder.getInt64Ty()); 909 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 910 F = Function::Create(Ty, Linkage, Name, M); 911 } 912 913 return Builder.CreateCall(F, {Size}); 914 } 915 916 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value *HostData, 917 Value *DeviceData, 918 Value *Size) { 919 assert(!ManagedMemory && "Managed memory does not transfer memory between " 920 "device and host"); 921 const char *Name = "polly_copyFromHostToDevice"; 922 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 923 Function *F = M->getFunction(Name); 924 925 // If F is not available, declare it. 926 if (!F) { 927 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 928 std::vector<Type *> Args; 929 Args.push_back(Builder.getInt8PtrTy()); 930 Args.push_back(Builder.getInt8PtrTy()); 931 Args.push_back(Builder.getInt64Ty()); 932 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 933 F = Function::Create(Ty, Linkage, Name, M); 934 } 935 936 Builder.CreateCall(F, {HostData, DeviceData, Size}); 937 } 938 939 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value *DeviceData, 940 Value *HostData, 941 Value *Size) { 942 assert(!ManagedMemory && "Managed memory does not transfer memory between " 943 "device and host"); 944 const char *Name = "polly_copyFromDeviceToHost"; 945 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 946 Function *F = M->getFunction(Name); 947 948 // If F is not available, declare it. 949 if (!F) { 950 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 951 std::vector<Type *> Args; 952 Args.push_back(Builder.getInt8PtrTy()); 953 Args.push_back(Builder.getInt8PtrTy()); 954 Args.push_back(Builder.getInt64Ty()); 955 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 956 F = Function::Create(Ty, Linkage, Name, M); 957 } 958 959 Builder.CreateCall(F, {DeviceData, HostData, Size}); 960 } 961 962 void GPUNodeBuilder::createCallSynchronizeDevice() { 963 assert(ManagedMemory && "explicit synchronization is only necessary for " 964 "managed memory"); 965 const char *Name = "polly_synchronizeDevice"; 966 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 967 Function *F = M->getFunction(Name); 968 969 // If F is not available, declare it. 970 if (!F) { 971 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 972 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), false); 973 F = Function::Create(Ty, Linkage, Name, M); 974 } 975 976 Builder.CreateCall(F); 977 } 978 979 Value *GPUNodeBuilder::createCallInitContext() { 980 const char *Name; 981 982 switch (Runtime) { 983 case GPURuntime::CUDA: 984 Name = "polly_initContextCUDA"; 985 break; 986 case GPURuntime::OpenCL: 987 Name = "polly_initContextCL"; 988 break; 989 } 990 991 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 992 Function *F = M->getFunction(Name); 993 994 // If F is not available, declare it. 995 if (!F) { 996 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 997 std::vector<Type *> Args; 998 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 999 F = Function::Create(Ty, Linkage, Name, M); 1000 } 1001 1002 return Builder.CreateCall(F, {}); 1003 } 1004 1005 void GPUNodeBuilder::createCallFreeContext(Value *Context) { 1006 const char *Name = "polly_freeContext"; 1007 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1008 Function *F = M->getFunction(Name); 1009 1010 // If F is not available, declare it. 1011 if (!F) { 1012 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1013 std::vector<Type *> Args; 1014 Args.push_back(Builder.getInt8PtrTy()); 1015 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1016 F = Function::Create(Ty, Linkage, Name, M); 1017 } 1018 1019 Builder.CreateCall(F, {Context}); 1020 } 1021 1022 /// Check if one string is a prefix of another. 1023 /// 1024 /// @param String The string in which to look for the prefix. 1025 /// @param Prefix The prefix to look for. 1026 static bool isPrefix(std::string String, std::string Prefix) { 1027 return String.find(Prefix) == 0; 1028 } 1029 1030 Value *GPUNodeBuilder::getArraySize(gpu_array_info *Array) { 1031 isl::ast_build Build = 1032 isl::ast_build::from_context(isl::manage(S.getContext())); 1033 Value *ArraySize = ConstantInt::get(Builder.getInt64Ty(), Array->size); 1034 1035 if (!gpu_array_is_scalar(Array)) { 1036 isl::multi_pw_aff ArrayBound = 1037 isl::manage(isl_multi_pw_aff_copy(Array->bound)); 1038 1039 isl::pw_aff OffsetDimZero = ArrayBound.get_pw_aff(0); 1040 isl::ast_expr Res = Build.expr_from(OffsetDimZero); 1041 1042 for (unsigned int i = 1; i < Array->n_index; i++) { 1043 isl::pw_aff Bound_I = ArrayBound.get_pw_aff(i); 1044 isl::ast_expr Expr = Build.expr_from(Bound_I); 1045 Res = Res.mul(Expr); 1046 } 1047 1048 Value *NumElements = ExprBuilder.create(Res.release()); 1049 if (NumElements->getType() != ArraySize->getType()) 1050 NumElements = Builder.CreateSExt(NumElements, ArraySize->getType()); 1051 ArraySize = Builder.CreateMul(ArraySize, NumElements); 1052 } 1053 return ArraySize; 1054 } 1055 1056 Value *GPUNodeBuilder::getArrayOffset(gpu_array_info *Array) { 1057 if (gpu_array_is_scalar(Array)) 1058 return nullptr; 1059 1060 isl_ast_build *Build = isl_ast_build_from_context(S.getContext()); 1061 1062 isl_set *Min = isl_set_lexmin(isl_set_copy(Array->extent)); 1063 1064 isl_set *ZeroSet = isl_set_universe(isl_set_get_space(Min)); 1065 1066 for (long i = 0; i < isl_set_dim(Min, isl_dim_set); i++) 1067 ZeroSet = isl_set_fix_si(ZeroSet, isl_dim_set, i, 0); 1068 1069 if (isl_set_is_subset(Min, ZeroSet)) { 1070 isl_set_free(Min); 1071 isl_set_free(ZeroSet); 1072 isl_ast_build_free(Build); 1073 return nullptr; 1074 } 1075 isl_set_free(ZeroSet); 1076 1077 isl_ast_expr *Result = 1078 isl_ast_expr_from_val(isl_val_int_from_si(isl_set_get_ctx(Min), 0)); 1079 1080 for (long i = 0; i < isl_set_dim(Min, isl_dim_set); i++) { 1081 if (i > 0) { 1082 isl_pw_aff *Bound_I = isl_multi_pw_aff_get_pw_aff(Array->bound, i - 1); 1083 isl_ast_expr *BExpr = isl_ast_build_expr_from_pw_aff(Build, Bound_I); 1084 Result = isl_ast_expr_mul(Result, BExpr); 1085 } 1086 isl_pw_aff *DimMin = isl_set_dim_min(isl_set_copy(Min), i); 1087 isl_ast_expr *MExpr = isl_ast_build_expr_from_pw_aff(Build, DimMin); 1088 Result = isl_ast_expr_add(Result, MExpr); 1089 } 1090 1091 Value *ResultValue = ExprBuilder.create(Result); 1092 isl_set_free(Min); 1093 isl_ast_build_free(Build); 1094 1095 return ResultValue; 1096 } 1097 1098 Value *GPUNodeBuilder::getOrCreateManagedDeviceArray(gpu_array_info *Array, 1099 ScopArrayInfo *ArrayInfo) { 1100 1101 assert(ManagedMemory && "Only used when you wish to get a host " 1102 "pointer for sending data to the kernel, " 1103 "with managed memory"); 1104 std::map<ScopArrayInfo *, Value *>::iterator it; 1105 if ((it = DeviceAllocations.find(ArrayInfo)) != DeviceAllocations.end()) { 1106 return it->second; 1107 } else { 1108 Value *HostPtr; 1109 1110 if (gpu_array_is_scalar(Array)) 1111 HostPtr = BlockGen.getOrCreateAlloca(ArrayInfo); 1112 else 1113 HostPtr = ArrayInfo->getBasePtr(); 1114 1115 Value *Offset = getArrayOffset(Array); 1116 if (Offset) { 1117 HostPtr = Builder.CreatePointerCast( 1118 HostPtr, ArrayInfo->getElementType()->getPointerTo()); 1119 HostPtr = Builder.CreateGEP(HostPtr, Offset); 1120 } 1121 1122 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy()); 1123 DeviceAllocations[ArrayInfo] = HostPtr; 1124 return HostPtr; 1125 } 1126 } 1127 1128 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node *TransferStmt, 1129 enum DataDirection Direction) { 1130 assert(!ManagedMemory && "Managed memory needs no data transfers"); 1131 isl_ast_expr *Expr = isl_ast_node_user_get_expr(TransferStmt); 1132 isl_ast_expr *Arg = isl_ast_expr_get_op_arg(Expr, 0); 1133 isl_id *Id = isl_ast_expr_get_id(Arg); 1134 auto Array = (gpu_array_info *)isl_id_get_user(Id); 1135 auto ScopArray = (ScopArrayInfo *)(Array->user); 1136 1137 Value *Size = getArraySize(Array); 1138 Value *Offset = getArrayOffset(Array); 1139 Value *DevPtr = DeviceAllocations[ScopArray]; 1140 1141 Value *HostPtr; 1142 1143 if (gpu_array_is_scalar(Array)) 1144 HostPtr = BlockGen.getOrCreateAlloca(ScopArray); 1145 else 1146 HostPtr = ScopArray->getBasePtr(); 1147 1148 if (Offset) { 1149 HostPtr = Builder.CreatePointerCast( 1150 HostPtr, ScopArray->getElementType()->getPointerTo()); 1151 HostPtr = Builder.CreateGEP(HostPtr, Offset); 1152 } 1153 1154 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy()); 1155 1156 if (Offset) { 1157 Size = Builder.CreateSub( 1158 Size, Builder.CreateMul( 1159 Offset, Builder.getInt64(ScopArray->getElemSizeInBytes()))); 1160 } 1161 1162 if (Direction == HOST_TO_DEVICE) 1163 createCallCopyFromHostToDevice(HostPtr, DevPtr, Size); 1164 else 1165 createCallCopyFromDeviceToHost(DevPtr, HostPtr, Size); 1166 1167 isl_id_free(Id); 1168 isl_ast_expr_free(Arg); 1169 isl_ast_expr_free(Expr); 1170 isl_ast_node_free(TransferStmt); 1171 } 1172 1173 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) { 1174 isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt); 1175 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0); 1176 isl_id *Id = isl_ast_expr_get_id(StmtExpr); 1177 isl_id_free(Id); 1178 isl_ast_expr_free(StmtExpr); 1179 1180 const char *Str = isl_id_get_name(Id); 1181 if (!strcmp(Str, "kernel")) { 1182 createKernel(UserStmt); 1183 isl_ast_expr_free(Expr); 1184 return; 1185 } 1186 if (!strcmp(Str, "init_device")) { 1187 initializeAfterRTH(); 1188 isl_ast_node_free(UserStmt); 1189 isl_ast_expr_free(Expr); 1190 return; 1191 } 1192 if (!strcmp(Str, "clear_device")) { 1193 finalize(); 1194 isl_ast_node_free(UserStmt); 1195 isl_ast_expr_free(Expr); 1196 return; 1197 } 1198 if (isPrefix(Str, "to_device")) { 1199 if (!ManagedMemory) 1200 createDataTransfer(UserStmt, HOST_TO_DEVICE); 1201 else 1202 isl_ast_node_free(UserStmt); 1203 1204 isl_ast_expr_free(Expr); 1205 return; 1206 } 1207 1208 if (isPrefix(Str, "from_device")) { 1209 if (!ManagedMemory) { 1210 createDataTransfer(UserStmt, DEVICE_TO_HOST); 1211 } else { 1212 createCallSynchronizeDevice(); 1213 isl_ast_node_free(UserStmt); 1214 } 1215 isl_ast_expr_free(Expr); 1216 return; 1217 } 1218 1219 isl_id *Anno = isl_ast_node_get_annotation(UserStmt); 1220 struct ppcg_kernel_stmt *KernelStmt = 1221 (struct ppcg_kernel_stmt *)isl_id_get_user(Anno); 1222 isl_id_free(Anno); 1223 1224 switch (KernelStmt->type) { 1225 case ppcg_kernel_domain: 1226 createScopStmt(Expr, KernelStmt); 1227 isl_ast_node_free(UserStmt); 1228 return; 1229 case ppcg_kernel_copy: 1230 createKernelCopy(KernelStmt); 1231 isl_ast_expr_free(Expr); 1232 isl_ast_node_free(UserStmt); 1233 return; 1234 case ppcg_kernel_sync: 1235 createKernelSync(); 1236 isl_ast_expr_free(Expr); 1237 isl_ast_node_free(UserStmt); 1238 return; 1239 } 1240 1241 isl_ast_expr_free(Expr); 1242 isl_ast_node_free(UserStmt); 1243 return; 1244 } 1245 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) { 1246 isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index); 1247 LocalIndex = isl_ast_expr_address_of(LocalIndex); 1248 Value *LocalAddr = ExprBuilder.create(LocalIndex); 1249 isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index); 1250 Index = isl_ast_expr_address_of(Index); 1251 Value *GlobalAddr = ExprBuilder.create(Index); 1252 1253 if (KernelStmt->u.c.read) { 1254 LoadInst *Load = Builder.CreateLoad(GlobalAddr, "shared.read"); 1255 Builder.CreateStore(Load, LocalAddr); 1256 } else { 1257 LoadInst *Load = Builder.CreateLoad(LocalAddr, "shared.write"); 1258 Builder.CreateStore(Load, GlobalAddr); 1259 } 1260 } 1261 1262 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr, 1263 ppcg_kernel_stmt *KernelStmt) { 1264 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt; 1265 isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr; 1266 1267 LoopToScevMapT LTS; 1268 LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end()); 1269 1270 createSubstitutions(Expr, Stmt, LTS); 1271 1272 if (Stmt->isBlockStmt()) 1273 BlockGen.copyStmt(*Stmt, LTS, Indexes); 1274 else 1275 RegionGen.copyStmt(*Stmt, LTS, Indexes); 1276 } 1277 1278 void GPUNodeBuilder::createKernelSync() { 1279 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1280 const char *SpirName = "__gen_ocl_barrier_global"; 1281 1282 Function *Sync; 1283 1284 switch (Arch) { 1285 case GPUArch::SPIR64: 1286 case GPUArch::SPIR32: 1287 Sync = M->getFunction(SpirName); 1288 1289 // If Sync is not available, declare it. 1290 if (!Sync) { 1291 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1292 std::vector<Type *> Args; 1293 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1294 Sync = Function::Create(Ty, Linkage, SpirName, M); 1295 Sync->setCallingConv(CallingConv::SPIR_FUNC); 1296 } 1297 break; 1298 case GPUArch::NVPTX64: 1299 Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0); 1300 break; 1301 } 1302 1303 Builder.CreateCall(Sync, {}); 1304 } 1305 1306 /// Collect llvm::Values referenced from @p Node 1307 /// 1308 /// This function only applies to isl_ast_nodes that are user_nodes referring 1309 /// to a ScopStmt. All other node types are ignore. 1310 /// 1311 /// @param Node The node to collect references for. 1312 /// @param User A user pointer used as storage for the data that is collected. 1313 /// 1314 /// @returns isl_bool_true if data could be collected successfully. 1315 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) { 1316 if (isl_ast_node_get_type(Node) != isl_ast_node_user) 1317 return isl_bool_true; 1318 1319 isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node); 1320 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0); 1321 isl_id *Id = isl_ast_expr_get_id(StmtExpr); 1322 const char *Str = isl_id_get_name(Id); 1323 isl_id_free(Id); 1324 isl_ast_expr_free(StmtExpr); 1325 isl_ast_expr_free(Expr); 1326 1327 if (!isPrefix(Str, "Stmt")) 1328 return isl_bool_true; 1329 1330 Id = isl_ast_node_get_annotation(Node); 1331 auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id); 1332 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt; 1333 isl_id_free(Id); 1334 1335 addReferencesFromStmt(Stmt, User, false /* CreateScalarRefs */); 1336 1337 return isl_bool_true; 1338 } 1339 1340 /// A list of functions that are available in NVIDIA's libdevice. 1341 const std::set<std::string> CUDALibDeviceFunctions = { 1342 "exp", "expf", "expl", "cos", "cosf", 1343 "sqrt", "sqrtf", "copysign", "copysignf", "copysignl"}; 1344 1345 /// Return the corresponding CUDA libdevice function name for @p F. 1346 /// 1347 /// Return "" if we are not compiling for CUDA. 1348 std::string getCUDALibDeviceFuntion(Function *F) { 1349 if (CUDALibDeviceFunctions.count(F->getName())) 1350 return std::string("__nv_") + std::string(F->getName()); 1351 1352 return ""; 1353 } 1354 1355 /// Check if F is a function that we can code-generate in a GPU kernel. 1356 static bool isValidFunctionInKernel(llvm::Function *F, bool AllowLibDevice) { 1357 assert(F && "F is an invalid pointer"); 1358 // We string compare against the name of the function to allow 1359 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and 1360 // "llvm.copysign". 1361 const StringRef Name = F->getName(); 1362 1363 if (AllowLibDevice && getCUDALibDeviceFuntion(F).length() > 0) 1364 return true; 1365 1366 return F->isIntrinsic() && 1367 (Name.startswith("llvm.sqrt") || Name.startswith("llvm.fabs") || 1368 Name.startswith("llvm.copysign")); 1369 } 1370 1371 /// Do not take `Function` as a subtree value. 1372 /// 1373 /// We try to take the reference of all subtree values and pass them along 1374 /// to the kernel from the host. Taking an address of any function and 1375 /// trying to pass along is nonsensical. Only allow `Value`s that are not 1376 /// `Function`s. 1377 static bool isValidSubtreeValue(llvm::Value *V) { return !isa<Function>(V); } 1378 1379 /// Return `Function`s from `RawSubtreeValues`. 1380 static SetVector<Function *> 1381 getFunctionsFromRawSubtreeValues(SetVector<Value *> RawSubtreeValues, 1382 bool AllowCUDALibDevice) { 1383 SetVector<Function *> SubtreeFunctions; 1384 for (Value *It : RawSubtreeValues) { 1385 Function *F = dyn_cast<Function>(It); 1386 if (F) { 1387 assert(isValidFunctionInKernel(F, AllowCUDALibDevice) && 1388 "Code should have bailed out by " 1389 "this point if an invalid function " 1390 "were present in a kernel."); 1391 SubtreeFunctions.insert(F); 1392 } 1393 } 1394 return SubtreeFunctions; 1395 } 1396 1397 std::pair<SetVector<Value *>, SetVector<Function *>> 1398 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) { 1399 SetVector<Value *> SubtreeValues; 1400 SetVector<const SCEV *> SCEVs; 1401 SetVector<const Loop *> Loops; 1402 SubtreeReferences References = { 1403 LI, SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator()}; 1404 1405 for (const auto &I : IDToValue) 1406 SubtreeValues.insert(I.second); 1407 1408 isl_ast_node_foreach_descendant_top_down( 1409 Kernel->tree, collectReferencesInGPUStmt, &References); 1410 1411 for (const SCEV *Expr : SCEVs) 1412 findValues(Expr, SE, SubtreeValues); 1413 1414 for (auto &SAI : S.arrays()) 1415 SubtreeValues.remove(SAI->getBasePtr()); 1416 1417 isl_space *Space = S.getParamSpace(); 1418 for (long i = 0; i < isl_space_dim(Space, isl_dim_param); i++) { 1419 isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i); 1420 assert(IDToValue.count(Id)); 1421 Value *Val = IDToValue[Id]; 1422 SubtreeValues.remove(Val); 1423 isl_id_free(Id); 1424 } 1425 isl_space_free(Space); 1426 1427 for (long i = 0; i < isl_space_dim(Kernel->space, isl_dim_set); i++) { 1428 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 1429 assert(IDToValue.count(Id)); 1430 Value *Val = IDToValue[Id]; 1431 SubtreeValues.remove(Val); 1432 isl_id_free(Id); 1433 } 1434 1435 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions 1436 // SubtreeValues. This is important, because we should not lose any 1437 // SubtreeValues in the process of constructing the 1438 // "ValidSubtree{Values, Functions} sets. Nor should the set 1439 // ValidSubtree{Values, Functions} have any common element. 1440 auto ValidSubtreeValuesIt = 1441 make_filter_range(SubtreeValues, isValidSubtreeValue); 1442 SetVector<Value *> ValidSubtreeValues(ValidSubtreeValuesIt.begin(), 1443 ValidSubtreeValuesIt.end()); 1444 1445 bool AllowCUDALibDevice = Arch == GPUArch::NVPTX64; 1446 1447 SetVector<Function *> ValidSubtreeFunctions( 1448 getFunctionsFromRawSubtreeValues(SubtreeValues, AllowCUDALibDevice)); 1449 1450 // @see IslNodeBuilder::getReferencesInSubtree 1451 SetVector<Value *> ReplacedValues; 1452 for (Value *V : ValidSubtreeValues) { 1453 auto It = ValueMap.find(V); 1454 if (It == ValueMap.end()) 1455 ReplacedValues.insert(V); 1456 else 1457 ReplacedValues.insert(It->second); 1458 } 1459 return std::make_pair(ReplacedValues, ValidSubtreeFunctions); 1460 } 1461 1462 void GPUNodeBuilder::clearDominators(Function *F) { 1463 DomTreeNode *N = DT.getNode(&F->getEntryBlock()); 1464 std::vector<BasicBlock *> Nodes; 1465 for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I) 1466 Nodes.push_back(I->getBlock()); 1467 1468 for (BasicBlock *BB : Nodes) 1469 DT.eraseNode(BB); 1470 } 1471 1472 void GPUNodeBuilder::clearScalarEvolution(Function *F) { 1473 for (BasicBlock &BB : *F) { 1474 Loop *L = LI.getLoopFor(&BB); 1475 if (L) 1476 SE.forgetLoop(L); 1477 } 1478 } 1479 1480 void GPUNodeBuilder::clearLoops(Function *F) { 1481 for (BasicBlock &BB : *F) { 1482 Loop *L = LI.getLoopFor(&BB); 1483 if (L) 1484 SE.forgetLoop(L); 1485 LI.removeBlock(&BB); 1486 } 1487 } 1488 1489 std::tuple<Value *, Value *> GPUNodeBuilder::getGridSizes(ppcg_kernel *Kernel) { 1490 std::vector<Value *> Sizes; 1491 isl_ast_build *Context = isl_ast_build_from_context(S.getContext()); 1492 1493 for (long i = 0; i < Kernel->n_grid; i++) { 1494 isl_pw_aff *Size = isl_multi_pw_aff_get_pw_aff(Kernel->grid_size, i); 1495 isl_ast_expr *GridSize = isl_ast_build_expr_from_pw_aff(Context, Size); 1496 Value *Res = ExprBuilder.create(GridSize); 1497 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty()); 1498 Sizes.push_back(Res); 1499 } 1500 isl_ast_build_free(Context); 1501 1502 for (long i = Kernel->n_grid; i < 3; i++) 1503 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1)); 1504 1505 return std::make_tuple(Sizes[0], Sizes[1]); 1506 } 1507 1508 std::tuple<Value *, Value *, Value *> 1509 GPUNodeBuilder::getBlockSizes(ppcg_kernel *Kernel) { 1510 std::vector<Value *> Sizes; 1511 1512 for (long i = 0; i < Kernel->n_block; i++) { 1513 Value *Res = ConstantInt::get(Builder.getInt32Ty(), Kernel->block_dim[i]); 1514 Sizes.push_back(Res); 1515 } 1516 1517 for (long i = Kernel->n_block; i < 3; i++) 1518 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1)); 1519 1520 return std::make_tuple(Sizes[0], Sizes[1], Sizes[2]); 1521 } 1522 1523 void GPUNodeBuilder::insertStoreParameter(Instruction *Parameters, 1524 Instruction *Param, int Index) { 1525 Value *Slot = Builder.CreateGEP( 1526 Parameters, {Builder.getInt64(0), Builder.getInt64(Index)}); 1527 Value *ParamTyped = Builder.CreatePointerCast(Param, Builder.getInt8PtrTy()); 1528 Builder.CreateStore(ParamTyped, Slot); 1529 } 1530 1531 Value * 1532 GPUNodeBuilder::createLaunchParameters(ppcg_kernel *Kernel, Function *F, 1533 SetVector<Value *> SubtreeValues) { 1534 const int NumArgs = F->arg_size(); 1535 std::vector<int> ArgSizes(NumArgs); 1536 1537 Type *ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), 2 * NumArgs); 1538 1539 BasicBlock *EntryBlock = 1540 &Builder.GetInsertBlock()->getParent()->getEntryBlock(); 1541 auto AddressSpace = F->getParent()->getDataLayout().getAllocaAddrSpace(); 1542 std::string Launch = "polly_launch_" + std::to_string(Kernel->id); 1543 Instruction *Parameters = new AllocaInst( 1544 ArrayTy, AddressSpace, Launch + "_params", EntryBlock->getTerminator()); 1545 1546 int Index = 0; 1547 for (long i = 0; i < Prog->n_array; i++) { 1548 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 1549 continue; 1550 1551 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 1552 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id)); 1553 1554 ArgSizes[Index] = SAI->getElemSizeInBytes(); 1555 1556 Value *DevArray = nullptr; 1557 if (ManagedMemory) { 1558 DevArray = getOrCreateManagedDeviceArray( 1559 &Prog->array[i], const_cast<ScopArrayInfo *>(SAI)); 1560 } else { 1561 DevArray = DeviceAllocations[const_cast<ScopArrayInfo *>(SAI)]; 1562 DevArray = createCallGetDevicePtr(DevArray); 1563 } 1564 assert(DevArray != nullptr && "Array to be offloaded to device not " 1565 "initialized"); 1566 Value *Offset = getArrayOffset(&Prog->array[i]); 1567 1568 if (Offset) { 1569 DevArray = Builder.CreatePointerCast( 1570 DevArray, SAI->getElementType()->getPointerTo()); 1571 DevArray = Builder.CreateGEP(DevArray, Builder.CreateNeg(Offset)); 1572 DevArray = Builder.CreatePointerCast(DevArray, Builder.getInt8PtrTy()); 1573 } 1574 Value *Slot = Builder.CreateGEP( 1575 Parameters, {Builder.getInt64(0), Builder.getInt64(Index)}); 1576 1577 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 1578 Value *ValPtr = nullptr; 1579 if (ManagedMemory) 1580 ValPtr = DevArray; 1581 else 1582 ValPtr = BlockGen.getOrCreateAlloca(SAI); 1583 1584 assert(ValPtr != nullptr && "ValPtr that should point to a valid object" 1585 " to be stored into Parameters"); 1586 Value *ValPtrCast = 1587 Builder.CreatePointerCast(ValPtr, Builder.getInt8PtrTy()); 1588 Builder.CreateStore(ValPtrCast, Slot); 1589 } else { 1590 Instruction *Param = 1591 new AllocaInst(Builder.getInt8PtrTy(), AddressSpace, 1592 Launch + "_param_" + std::to_string(Index), 1593 EntryBlock->getTerminator()); 1594 Builder.CreateStore(DevArray, Param); 1595 Value *ParamTyped = 1596 Builder.CreatePointerCast(Param, Builder.getInt8PtrTy()); 1597 Builder.CreateStore(ParamTyped, Slot); 1598 } 1599 Index++; 1600 } 1601 1602 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set); 1603 1604 for (long i = 0; i < NumHostIters; i++) { 1605 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 1606 Value *Val = IDToValue[Id]; 1607 isl_id_free(Id); 1608 1609 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1610 1611 Instruction *Param = 1612 new AllocaInst(Val->getType(), AddressSpace, 1613 Launch + "_param_" + std::to_string(Index), 1614 EntryBlock->getTerminator()); 1615 Builder.CreateStore(Val, Param); 1616 insertStoreParameter(Parameters, Param, Index); 1617 Index++; 1618 } 1619 1620 int NumVars = isl_space_dim(Kernel->space, isl_dim_param); 1621 1622 for (long i = 0; i < NumVars; i++) { 1623 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 1624 Value *Val = IDToValue[Id]; 1625 if (ValueMap.count(Val)) 1626 Val = ValueMap[Val]; 1627 isl_id_free(Id); 1628 1629 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1630 1631 Instruction *Param = 1632 new AllocaInst(Val->getType(), AddressSpace, 1633 Launch + "_param_" + std::to_string(Index), 1634 EntryBlock->getTerminator()); 1635 Builder.CreateStore(Val, Param); 1636 insertStoreParameter(Parameters, Param, Index); 1637 Index++; 1638 } 1639 1640 for (auto Val : SubtreeValues) { 1641 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1642 1643 Instruction *Param = 1644 new AllocaInst(Val->getType(), AddressSpace, 1645 Launch + "_param_" + std::to_string(Index), 1646 EntryBlock->getTerminator()); 1647 Builder.CreateStore(Val, Param); 1648 insertStoreParameter(Parameters, Param, Index); 1649 Index++; 1650 } 1651 1652 for (int i = 0; i < NumArgs; i++) { 1653 Value *Val = ConstantInt::get(Builder.getInt32Ty(), ArgSizes[i]); 1654 Instruction *Param = 1655 new AllocaInst(Builder.getInt32Ty(), AddressSpace, 1656 Launch + "_param_size_" + std::to_string(i), 1657 EntryBlock->getTerminator()); 1658 Builder.CreateStore(Val, Param); 1659 insertStoreParameter(Parameters, Param, Index); 1660 Index++; 1661 } 1662 1663 auto Location = EntryBlock->getTerminator(); 1664 return new BitCastInst(Parameters, Builder.getInt8PtrTy(), 1665 Launch + "_params_i8ptr", Location); 1666 } 1667 1668 void GPUNodeBuilder::setupKernelSubtreeFunctions( 1669 SetVector<Function *> SubtreeFunctions) { 1670 for (auto Fn : SubtreeFunctions) { 1671 const std::string ClonedFnName = Fn->getName(); 1672 Function *Clone = GPUModule->getFunction(ClonedFnName); 1673 if (!Clone) 1674 Clone = 1675 Function::Create(Fn->getFunctionType(), GlobalValue::ExternalLinkage, 1676 ClonedFnName, GPUModule.get()); 1677 assert(Clone && "Expected cloned function to be initialized."); 1678 assert(ValueMap.find(Fn) == ValueMap.end() && 1679 "Fn already present in ValueMap"); 1680 ValueMap[Fn] = Clone; 1681 } 1682 } 1683 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) { 1684 isl_id *Id = isl_ast_node_get_annotation(KernelStmt); 1685 ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id); 1686 isl_id_free(Id); 1687 isl_ast_node_free(KernelStmt); 1688 1689 if (Kernel->n_grid > 1) 1690 DeepestParallel = 1691 std::max(DeepestParallel, isl_space_dim(Kernel->space, isl_dim_set)); 1692 else 1693 DeepestSequential = 1694 std::max(DeepestSequential, isl_space_dim(Kernel->space, isl_dim_set)); 1695 1696 Value *BlockDimX, *BlockDimY, *BlockDimZ; 1697 std::tie(BlockDimX, BlockDimY, BlockDimZ) = getBlockSizes(Kernel); 1698 1699 SetVector<Value *> SubtreeValues; 1700 SetVector<Function *> SubtreeFunctions; 1701 std::tie(SubtreeValues, SubtreeFunctions) = getReferencesInKernel(Kernel); 1702 1703 assert(Kernel->tree && "Device AST of kernel node is empty"); 1704 1705 Instruction &HostInsertPoint = *Builder.GetInsertPoint(); 1706 IslExprBuilder::IDToValueTy HostIDs = IDToValue; 1707 ValueMapT HostValueMap = ValueMap; 1708 BlockGenerator::AllocaMapTy HostScalarMap = ScalarMap; 1709 ScalarMap.clear(); 1710 1711 SetVector<const Loop *> Loops; 1712 1713 // Create for all loops we depend on values that contain the current loop 1714 // iteration. These values are necessary to generate code for SCEVs that 1715 // depend on such loops. As a result we need to pass them to the subfunction. 1716 for (const Loop *L : Loops) { 1717 const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)), 1718 SE.getUnknown(Builder.getInt64(1)), 1719 L, SCEV::FlagAnyWrap); 1720 Value *V = generateSCEV(OuterLIV); 1721 OutsideLoopIterations[L] = SE.getUnknown(V); 1722 SubtreeValues.insert(V); 1723 } 1724 1725 createKernelFunction(Kernel, SubtreeValues, SubtreeFunctions); 1726 setupKernelSubtreeFunctions(SubtreeFunctions); 1727 1728 create(isl_ast_node_copy(Kernel->tree)); 1729 1730 finalizeKernelArguments(Kernel); 1731 Function *F = Builder.GetInsertBlock()->getParent(); 1732 if (Arch == GPUArch::NVPTX64) 1733 addCUDAAnnotations(F->getParent(), BlockDimX, BlockDimY, BlockDimZ); 1734 clearDominators(F); 1735 clearScalarEvolution(F); 1736 clearLoops(F); 1737 1738 IDToValue = HostIDs; 1739 1740 ValueMap = std::move(HostValueMap); 1741 ScalarMap = std::move(HostScalarMap); 1742 EscapeMap.clear(); 1743 IDToSAI.clear(); 1744 Annotator.resetAlternativeAliasBases(); 1745 for (auto &BasePtr : LocalArrays) 1746 S.invalidateScopArrayInfo(BasePtr, MemoryKind::Array); 1747 LocalArrays.clear(); 1748 1749 std::string ASMString = finalizeKernelFunction(); 1750 Builder.SetInsertPoint(&HostInsertPoint); 1751 Value *Parameters = createLaunchParameters(Kernel, F, SubtreeValues); 1752 1753 std::string Name = getKernelFuncName(Kernel->id); 1754 Value *KernelString = Builder.CreateGlobalStringPtr(ASMString, Name); 1755 Value *NameString = Builder.CreateGlobalStringPtr(Name, Name + "_name"); 1756 Value *GPUKernel = createCallGetKernel(KernelString, NameString); 1757 1758 Value *GridDimX, *GridDimY; 1759 std::tie(GridDimX, GridDimY) = getGridSizes(Kernel); 1760 1761 createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY, 1762 BlockDimZ, Parameters); 1763 createCallFreeKernel(GPUKernel); 1764 1765 for (auto Id : KernelIds) 1766 isl_id_free(Id); 1767 1768 KernelIds.clear(); 1769 } 1770 1771 /// Compute the DataLayout string for the NVPTX backend. 1772 /// 1773 /// @param is64Bit Are we looking for a 64 bit architecture? 1774 static std::string computeNVPTXDataLayout(bool is64Bit) { 1775 std::string Ret = ""; 1776 1777 if (!is64Bit) { 1778 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1779 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:" 1780 "64-v128:128:128-n16:32:64"; 1781 } else { 1782 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1783 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:" 1784 "64-v128:128:128-n16:32:64"; 1785 } 1786 1787 return Ret; 1788 } 1789 1790 /// Compute the DataLayout string for a SPIR kernel. 1791 /// 1792 /// @param is64Bit Are we looking for a 64 bit architecture? 1793 static std::string computeSPIRDataLayout(bool is64Bit) { 1794 std::string Ret = ""; 1795 1796 if (!is64Bit) { 1797 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1798 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:" 1799 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:" 1800 "256:256-v256:256:256-v512:512:512-v1024:1024:1024"; 1801 } else { 1802 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1803 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:" 1804 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:" 1805 "256:256-v256:256:256-v512:512:512-v1024:1024:1024"; 1806 } 1807 1808 return Ret; 1809 } 1810 1811 Function * 1812 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel, 1813 SetVector<Value *> &SubtreeValues) { 1814 std::vector<Type *> Args; 1815 std::string Identifier = getKernelFuncName(Kernel->id); 1816 1817 std::vector<Metadata *> MemoryType; 1818 1819 for (long i = 0; i < Prog->n_array; i++) { 1820 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 1821 continue; 1822 1823 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 1824 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 1825 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id)); 1826 Args.push_back(SAI->getElementType()); 1827 MemoryType.push_back( 1828 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1829 } else { 1830 static const int UseGlobalMemory = 1; 1831 Args.push_back(Builder.getInt8PtrTy(UseGlobalMemory)); 1832 MemoryType.push_back( 1833 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 1))); 1834 } 1835 } 1836 1837 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set); 1838 1839 for (long i = 0; i < NumHostIters; i++) { 1840 Args.push_back(Builder.getInt64Ty()); 1841 MemoryType.push_back( 1842 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1843 } 1844 1845 int NumVars = isl_space_dim(Kernel->space, isl_dim_param); 1846 1847 for (long i = 0; i < NumVars; i++) { 1848 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 1849 Value *Val = IDToValue[Id]; 1850 isl_id_free(Id); 1851 Args.push_back(Val->getType()); 1852 MemoryType.push_back( 1853 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1854 } 1855 1856 for (auto *V : SubtreeValues) { 1857 Args.push_back(V->getType()); 1858 MemoryType.push_back( 1859 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1860 } 1861 1862 auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false); 1863 auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier, 1864 GPUModule.get()); 1865 1866 std::vector<Metadata *> EmptyStrings; 1867 1868 for (unsigned int i = 0; i < MemoryType.size(); i++) { 1869 EmptyStrings.push_back(MDString::get(FN->getContext(), "")); 1870 } 1871 1872 if (Arch == GPUArch::SPIR32 || Arch == GPUArch::SPIR64) { 1873 FN->setMetadata("kernel_arg_addr_space", 1874 MDNode::get(FN->getContext(), MemoryType)); 1875 FN->setMetadata("kernel_arg_name", 1876 MDNode::get(FN->getContext(), EmptyStrings)); 1877 FN->setMetadata("kernel_arg_access_qual", 1878 MDNode::get(FN->getContext(), EmptyStrings)); 1879 FN->setMetadata("kernel_arg_type", 1880 MDNode::get(FN->getContext(), EmptyStrings)); 1881 FN->setMetadata("kernel_arg_type_qual", 1882 MDNode::get(FN->getContext(), EmptyStrings)); 1883 FN->setMetadata("kernel_arg_base_type", 1884 MDNode::get(FN->getContext(), EmptyStrings)); 1885 } 1886 1887 switch (Arch) { 1888 case GPUArch::NVPTX64: 1889 FN->setCallingConv(CallingConv::PTX_Kernel); 1890 break; 1891 case GPUArch::SPIR32: 1892 case GPUArch::SPIR64: 1893 FN->setCallingConv(CallingConv::SPIR_KERNEL); 1894 break; 1895 } 1896 1897 auto Arg = FN->arg_begin(); 1898 for (long i = 0; i < Kernel->n_array; i++) { 1899 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 1900 continue; 1901 1902 Arg->setName(Kernel->array[i].array->name); 1903 1904 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 1905 const ScopArrayInfo *SAI = 1906 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id))); 1907 Type *EleTy = SAI->getElementType(); 1908 Value *Val = &*Arg; 1909 SmallVector<const SCEV *, 4> Sizes; 1910 isl_ast_build *Build = 1911 isl_ast_build_from_context(isl_set_copy(Prog->context)); 1912 Sizes.push_back(nullptr); 1913 for (long j = 1; j < Kernel->array[i].array->n_index; j++) { 1914 isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff( 1915 Build, isl_multi_pw_aff_get_pw_aff(Kernel->array[i].array->bound, j)); 1916 auto V = ExprBuilder.create(DimSize); 1917 Sizes.push_back(SE.getSCEV(V)); 1918 } 1919 const ScopArrayInfo *SAIRep = 1920 S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, MemoryKind::Array); 1921 LocalArrays.push_back(Val); 1922 1923 isl_ast_build_free(Build); 1924 KernelIds.push_back(Id); 1925 IDToSAI[Id] = SAIRep; 1926 Arg++; 1927 } 1928 1929 for (long i = 0; i < NumHostIters; i++) { 1930 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 1931 Arg->setName(isl_id_get_name(Id)); 1932 IDToValue[Id] = &*Arg; 1933 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 1934 Arg++; 1935 } 1936 1937 for (long i = 0; i < NumVars; i++) { 1938 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 1939 Arg->setName(isl_id_get_name(Id)); 1940 Value *Val = IDToValue[Id]; 1941 ValueMap[Val] = &*Arg; 1942 IDToValue[Id] = &*Arg; 1943 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 1944 Arg++; 1945 } 1946 1947 for (auto *V : SubtreeValues) { 1948 Arg->setName(V->getName()); 1949 ValueMap[V] = &*Arg; 1950 Arg++; 1951 } 1952 1953 return FN; 1954 } 1955 1956 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) { 1957 Intrinsic::ID IntrinsicsBID[2]; 1958 Intrinsic::ID IntrinsicsTID[3]; 1959 1960 switch (Arch) { 1961 case GPUArch::SPIR64: 1962 case GPUArch::SPIR32: 1963 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR"); 1964 case GPUArch::NVPTX64: 1965 IntrinsicsBID[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x; 1966 IntrinsicsBID[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y; 1967 1968 IntrinsicsTID[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x; 1969 IntrinsicsTID[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y; 1970 IntrinsicsTID[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z; 1971 break; 1972 } 1973 1974 auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable { 1975 std::string Name = isl_id_get_name(Id); 1976 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1977 Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr); 1978 Value *Val = Builder.CreateCall(IntrinsicFn, {}); 1979 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name); 1980 IDToValue[Id] = Val; 1981 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 1982 }; 1983 1984 for (int i = 0; i < Kernel->n_grid; ++i) { 1985 isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i); 1986 addId(Id, IntrinsicsBID[i]); 1987 } 1988 1989 for (int i = 0; i < Kernel->n_block; ++i) { 1990 isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i); 1991 addId(Id, IntrinsicsTID[i]); 1992 } 1993 } 1994 1995 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel *Kernel) { 1996 const char *GroupName[3] = {"__gen_ocl_get_group_id0", 1997 "__gen_ocl_get_group_id1", 1998 "__gen_ocl_get_group_id2"}; 1999 2000 const char *LocalName[3] = {"__gen_ocl_get_local_id0", 2001 "__gen_ocl_get_local_id1", 2002 "__gen_ocl_get_local_id2"}; 2003 2004 auto createFunc = [this](const char *Name, __isl_take isl_id *Id) mutable { 2005 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 2006 Function *FN = M->getFunction(Name); 2007 2008 // If FN is not available, declare it. 2009 if (!FN) { 2010 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 2011 std::vector<Type *> Args; 2012 FunctionType *Ty = FunctionType::get(Builder.getInt32Ty(), Args, false); 2013 FN = Function::Create(Ty, Linkage, Name, M); 2014 FN->setCallingConv(CallingConv::SPIR_FUNC); 2015 } 2016 2017 Value *Val = Builder.CreateCall(FN, {}); 2018 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name); 2019 IDToValue[Id] = Val; 2020 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2021 }; 2022 2023 for (int i = 0; i < Kernel->n_grid; ++i) 2024 createFunc(GroupName[i], isl_id_list_get_id(Kernel->block_ids, i)); 2025 2026 for (int i = 0; i < Kernel->n_block; ++i) 2027 createFunc(LocalName[i], isl_id_list_get_id(Kernel->thread_ids, i)); 2028 } 2029 2030 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel *Kernel, Function *FN) { 2031 auto Arg = FN->arg_begin(); 2032 for (long i = 0; i < Kernel->n_array; i++) { 2033 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 2034 continue; 2035 2036 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 2037 const ScopArrayInfo *SAI = 2038 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id))); 2039 isl_id_free(Id); 2040 2041 if (SAI->getNumberOfDimensions() > 0) { 2042 Arg++; 2043 continue; 2044 } 2045 2046 Value *Val = &*Arg; 2047 2048 if (!gpu_array_is_read_only_scalar(&Prog->array[i])) { 2049 Type *TypePtr = SAI->getElementType()->getPointerTo(); 2050 Value *TypedArgPtr = Builder.CreatePointerCast(Val, TypePtr); 2051 Val = Builder.CreateLoad(TypedArgPtr); 2052 } 2053 2054 Value *Alloca = BlockGen.getOrCreateAlloca(SAI); 2055 Builder.CreateStore(Val, Alloca); 2056 2057 Arg++; 2058 } 2059 } 2060 2061 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel *Kernel) { 2062 auto *FN = Builder.GetInsertBlock()->getParent(); 2063 auto Arg = FN->arg_begin(); 2064 2065 bool StoredScalar = false; 2066 for (long i = 0; i < Kernel->n_array; i++) { 2067 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 2068 continue; 2069 2070 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 2071 const ScopArrayInfo *SAI = 2072 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id))); 2073 isl_id_free(Id); 2074 2075 if (SAI->getNumberOfDimensions() > 0) { 2076 Arg++; 2077 continue; 2078 } 2079 2080 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 2081 Arg++; 2082 continue; 2083 } 2084 2085 Value *Alloca = BlockGen.getOrCreateAlloca(SAI); 2086 Value *ArgPtr = &*Arg; 2087 Type *TypePtr = SAI->getElementType()->getPointerTo(); 2088 Value *TypedArgPtr = Builder.CreatePointerCast(ArgPtr, TypePtr); 2089 Value *Val = Builder.CreateLoad(Alloca); 2090 Builder.CreateStore(Val, TypedArgPtr); 2091 StoredScalar = true; 2092 2093 Arg++; 2094 } 2095 2096 if (StoredScalar) 2097 /// In case more than one thread contains scalar stores, the generated 2098 /// code might be incorrect, if we only store at the end of the kernel. 2099 /// To support this case we need to store these scalars back at each 2100 /// memory store or at least before each kernel barrier. 2101 if (Kernel->n_block != 0 || Kernel->n_grid != 0) 2102 BuildSuccessful = 0; 2103 } 2104 2105 void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) { 2106 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 2107 2108 for (int i = 0; i < Kernel->n_var; ++i) { 2109 struct ppcg_kernel_var &Var = Kernel->var[i]; 2110 isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set); 2111 Type *EleTy = ScopArrayInfo::getFromId(isl::manage(Id))->getElementType(); 2112 2113 Type *ArrayTy = EleTy; 2114 SmallVector<const SCEV *, 4> Sizes; 2115 2116 Sizes.push_back(nullptr); 2117 for (unsigned int j = 1; j < Var.array->n_index; ++j) { 2118 isl_val *Val = isl_vec_get_element_val(Var.size, j); 2119 long Bound = isl_val_get_num_si(Val); 2120 isl_val_free(Val); 2121 Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound)); 2122 } 2123 2124 for (int j = Var.array->n_index - 1; j >= 0; --j) { 2125 isl_val *Val = isl_vec_get_element_val(Var.size, j); 2126 long Bound = isl_val_get_num_si(Val); 2127 isl_val_free(Val); 2128 ArrayTy = ArrayType::get(ArrayTy, Bound); 2129 } 2130 2131 const ScopArrayInfo *SAI; 2132 Value *Allocation; 2133 if (Var.type == ppcg_access_shared) { 2134 auto GlobalVar = new GlobalVariable( 2135 *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name, 2136 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal, 3); 2137 GlobalVar->setAlignment(EleTy->getPrimitiveSizeInBits() / 8); 2138 GlobalVar->setInitializer(Constant::getNullValue(ArrayTy)); 2139 2140 Allocation = GlobalVar; 2141 } else if (Var.type == ppcg_access_private) { 2142 Allocation = Builder.CreateAlloca(ArrayTy, 0, "private_array"); 2143 } else { 2144 llvm_unreachable("unknown variable type"); 2145 } 2146 SAI = 2147 S.getOrCreateScopArrayInfo(Allocation, EleTy, Sizes, MemoryKind::Array); 2148 Id = isl_id_alloc(S.getIslCtx(), Var.name, nullptr); 2149 IDToValue[Id] = Allocation; 2150 LocalArrays.push_back(Allocation); 2151 KernelIds.push_back(Id); 2152 IDToSAI[Id] = SAI; 2153 } 2154 } 2155 2156 void GPUNodeBuilder::createKernelFunction( 2157 ppcg_kernel *Kernel, SetVector<Value *> &SubtreeValues, 2158 SetVector<Function *> &SubtreeFunctions) { 2159 std::string Identifier = getKernelFuncName(Kernel->id); 2160 GPUModule.reset(new Module(Identifier, Builder.getContext())); 2161 2162 switch (Arch) { 2163 case GPUArch::NVPTX64: 2164 if (Runtime == GPURuntime::CUDA) 2165 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda")); 2166 else if (Runtime == GPURuntime::OpenCL) 2167 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl")); 2168 GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */)); 2169 break; 2170 case GPUArch::SPIR32: 2171 GPUModule->setTargetTriple(Triple::normalize("spir-unknown-unknown")); 2172 GPUModule->setDataLayout(computeSPIRDataLayout(false /* is64Bit */)); 2173 break; 2174 case GPUArch::SPIR64: 2175 GPUModule->setTargetTriple(Triple::normalize("spir64-unknown-unknown")); 2176 GPUModule->setDataLayout(computeSPIRDataLayout(true /* is64Bit */)); 2177 break; 2178 } 2179 2180 Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues); 2181 2182 BasicBlock *PrevBlock = Builder.GetInsertBlock(); 2183 auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN); 2184 2185 DT.addNewBlock(EntryBlock, PrevBlock); 2186 2187 Builder.SetInsertPoint(EntryBlock); 2188 Builder.CreateRetVoid(); 2189 Builder.SetInsertPoint(EntryBlock, EntryBlock->begin()); 2190 2191 ScopDetection::markFunctionAsInvalid(FN); 2192 2193 prepareKernelArguments(Kernel, FN); 2194 createKernelVariables(Kernel, FN); 2195 2196 switch (Arch) { 2197 case GPUArch::NVPTX64: 2198 insertKernelIntrinsics(Kernel); 2199 break; 2200 case GPUArch::SPIR32: 2201 case GPUArch::SPIR64: 2202 insertKernelCallsSPIR(Kernel); 2203 break; 2204 } 2205 } 2206 2207 std::string GPUNodeBuilder::createKernelASM() { 2208 llvm::Triple GPUTriple; 2209 2210 switch (Arch) { 2211 case GPUArch::NVPTX64: 2212 switch (Runtime) { 2213 case GPURuntime::CUDA: 2214 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda")); 2215 break; 2216 case GPURuntime::OpenCL: 2217 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl")); 2218 break; 2219 } 2220 break; 2221 case GPUArch::SPIR64: 2222 case GPUArch::SPIR32: 2223 std::string SPIRAssembly; 2224 raw_string_ostream IROstream(SPIRAssembly); 2225 IROstream << *GPUModule; 2226 IROstream.flush(); 2227 return SPIRAssembly; 2228 } 2229 2230 std::string ErrMsg; 2231 auto GPUTarget = TargetRegistry::lookupTarget(GPUTriple.getTriple(), ErrMsg); 2232 2233 if (!GPUTarget) { 2234 errs() << ErrMsg << "\n"; 2235 return ""; 2236 } 2237 2238 TargetOptions Options; 2239 Options.UnsafeFPMath = FastMath; 2240 2241 std::string subtarget; 2242 2243 switch (Arch) { 2244 case GPUArch::NVPTX64: 2245 subtarget = CudaVersion; 2246 break; 2247 case GPUArch::SPIR32: 2248 case GPUArch::SPIR64: 2249 llvm_unreachable("No subtarget for SPIR architecture"); 2250 } 2251 2252 std::unique_ptr<TargetMachine> TargetM(GPUTarget->createTargetMachine( 2253 GPUTriple.getTriple(), subtarget, "", Options, Optional<Reloc::Model>())); 2254 2255 SmallString<0> ASMString; 2256 raw_svector_ostream ASMStream(ASMString); 2257 llvm::legacy::PassManager PM; 2258 2259 PM.add(createTargetTransformInfoWrapperPass(TargetM->getTargetIRAnalysis())); 2260 2261 if (TargetM->addPassesToEmitFile( 2262 PM, ASMStream, TargetMachine::CGFT_AssemblyFile, true /* verify */)) { 2263 errs() << "The target does not support generation of this file type!\n"; 2264 return ""; 2265 } 2266 2267 PM.run(*GPUModule); 2268 2269 return ASMStream.str(); 2270 } 2271 2272 bool GPUNodeBuilder::requiresCUDALibDevice() { 2273 for (Function &F : GPUModule->functions()) { 2274 if (!F.isDeclaration()) 2275 continue; 2276 2277 std::string CUDALibDeviceFunc = getCUDALibDeviceFuntion(&F); 2278 if (CUDALibDeviceFunc.length() != 0) { 2279 F.setName(CUDALibDeviceFunc); 2280 return true; 2281 } 2282 } 2283 2284 return false; 2285 } 2286 2287 void GPUNodeBuilder::addCUDALibDevice() { 2288 if (Arch != GPUArch::NVPTX64) 2289 return; 2290 2291 if (requiresCUDALibDevice()) { 2292 SMDiagnostic Error; 2293 2294 errs() << CUDALibDevice << "\n"; 2295 auto LibDeviceModule = 2296 parseIRFile(CUDALibDevice, Error, GPUModule->getContext()); 2297 2298 if (!LibDeviceModule) { 2299 BuildSuccessful = false; 2300 report_fatal_error("Could not find or load libdevice. Skipping GPU " 2301 "kernel generation. Please set -polly-acc-libdevice " 2302 "accordingly.\n"); 2303 return; 2304 } 2305 2306 Linker L(*GPUModule); 2307 2308 // Set an nvptx64 target triple to avoid linker warnings. The original 2309 // triple of the libdevice files are nvptx-unknown-unknown. 2310 LibDeviceModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda")); 2311 L.linkInModule(std::move(LibDeviceModule), Linker::LinkOnlyNeeded); 2312 } 2313 } 2314 2315 std::string GPUNodeBuilder::finalizeKernelFunction() { 2316 2317 if (verifyModule(*GPUModule)) { 2318 DEBUG(dbgs() << "verifyModule failed on module:\n"; 2319 GPUModule->print(dbgs(), nullptr); dbgs() << "\n";); 2320 DEBUG(dbgs() << "verifyModule Error:\n"; 2321 verifyModule(*GPUModule, &dbgs());); 2322 2323 if (FailOnVerifyModuleFailure) 2324 llvm_unreachable("VerifyModule failed."); 2325 2326 BuildSuccessful = false; 2327 return ""; 2328 } 2329 2330 addCUDALibDevice(); 2331 2332 if (DumpKernelIR) 2333 outs() << *GPUModule << "\n"; 2334 2335 if (Arch != GPUArch::SPIR32 && Arch != GPUArch::SPIR64) { 2336 // Optimize module. 2337 llvm::legacy::PassManager OptPasses; 2338 PassManagerBuilder PassBuilder; 2339 PassBuilder.OptLevel = 3; 2340 PassBuilder.SizeLevel = 0; 2341 PassBuilder.populateModulePassManager(OptPasses); 2342 OptPasses.run(*GPUModule); 2343 } 2344 2345 std::string Assembly = createKernelASM(); 2346 2347 if (DumpKernelASM) 2348 outs() << Assembly << "\n"; 2349 2350 GPUModule.release(); 2351 KernelIDs.clear(); 2352 2353 return Assembly; 2354 } 2355 2356 namespace { 2357 class PPCGCodeGeneration : public ScopPass { 2358 public: 2359 static char ID; 2360 2361 GPURuntime Runtime = GPURuntime::CUDA; 2362 2363 GPUArch Architecture = GPUArch::NVPTX64; 2364 2365 /// The scop that is currently processed. 2366 Scop *S; 2367 2368 LoopInfo *LI; 2369 DominatorTree *DT; 2370 ScalarEvolution *SE; 2371 const DataLayout *DL; 2372 RegionInfo *RI; 2373 2374 PPCGCodeGeneration() : ScopPass(ID) {} 2375 2376 /// Construct compilation options for PPCG. 2377 /// 2378 /// @returns The compilation options. 2379 ppcg_options *createPPCGOptions() { 2380 auto DebugOptions = 2381 (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options)); 2382 auto Options = (ppcg_options *)malloc(sizeof(ppcg_options)); 2383 2384 DebugOptions->dump_schedule_constraints = false; 2385 DebugOptions->dump_schedule = false; 2386 DebugOptions->dump_final_schedule = false; 2387 DebugOptions->dump_sizes = false; 2388 DebugOptions->verbose = false; 2389 2390 Options->debug = DebugOptions; 2391 2392 Options->group_chains = false; 2393 Options->reschedule = true; 2394 Options->scale_tile_loops = false; 2395 Options->wrap = false; 2396 2397 Options->non_negative_parameters = false; 2398 Options->ctx = nullptr; 2399 Options->sizes = nullptr; 2400 2401 Options->tile = true; 2402 Options->tile_size = 32; 2403 2404 Options->isolate_full_tiles = false; 2405 2406 Options->use_private_memory = PrivateMemory; 2407 Options->use_shared_memory = SharedMemory; 2408 Options->max_shared_memory = 48 * 1024; 2409 2410 Options->target = PPCG_TARGET_CUDA; 2411 Options->openmp = false; 2412 Options->linearize_device_arrays = true; 2413 Options->allow_gnu_extensions = false; 2414 2415 Options->unroll_copy_shared = false; 2416 Options->unroll_gpu_tile = false; 2417 Options->live_range_reordering = true; 2418 2419 Options->live_range_reordering = true; 2420 Options->hybrid = false; 2421 Options->opencl_compiler_options = nullptr; 2422 Options->opencl_use_gpu = false; 2423 Options->opencl_n_include_file = 0; 2424 Options->opencl_include_files = nullptr; 2425 Options->opencl_print_kernel_types = false; 2426 Options->opencl_embed_kernel_code = false; 2427 2428 Options->save_schedule_file = nullptr; 2429 Options->load_schedule_file = nullptr; 2430 2431 return Options; 2432 } 2433 2434 /// Get a tagged access relation containing all accesses of type @p AccessTy. 2435 /// 2436 /// Instead of a normal access of the form: 2437 /// 2438 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)] 2439 /// 2440 /// a tagged access has the form 2441 /// 2442 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)] 2443 /// 2444 /// where 'id' is an additional space that references the memory access that 2445 /// triggered the access. 2446 /// 2447 /// @param AccessTy The type of the memory accesses to collect. 2448 /// 2449 /// @return The relation describing all tagged memory accesses. 2450 isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) { 2451 isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace()); 2452 2453 for (auto &Stmt : *S) 2454 for (auto &Acc : Stmt) 2455 if (Acc->getType() == AccessTy) { 2456 isl_map *Relation = Acc->getAccessRelation().release(); 2457 Relation = isl_map_intersect_domain(Relation, Stmt.getDomain()); 2458 2459 isl_space *Space = isl_map_get_space(Relation); 2460 Space = isl_space_range(Space); 2461 Space = isl_space_from_range(Space); 2462 Space = 2463 isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release()); 2464 isl_map *Universe = isl_map_universe(Space); 2465 Relation = isl_map_domain_product(Relation, Universe); 2466 Accesses = isl_union_map_add_map(Accesses, Relation); 2467 } 2468 2469 return Accesses; 2470 } 2471 2472 /// Get the set of all read accesses, tagged with the access id. 2473 /// 2474 /// @see getTaggedAccesses 2475 isl_union_map *getTaggedReads() { 2476 return getTaggedAccesses(MemoryAccess::READ); 2477 } 2478 2479 /// Get the set of all may (and must) accesses, tagged with the access id. 2480 /// 2481 /// @see getTaggedAccesses 2482 isl_union_map *getTaggedMayWrites() { 2483 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE), 2484 getTaggedAccesses(MemoryAccess::MUST_WRITE)); 2485 } 2486 2487 /// Get the set of all must accesses, tagged with the access id. 2488 /// 2489 /// @see getTaggedAccesses 2490 isl_union_map *getTaggedMustWrites() { 2491 return getTaggedAccesses(MemoryAccess::MUST_WRITE); 2492 } 2493 2494 /// Collect parameter and array names as isl_ids. 2495 /// 2496 /// To reason about the different parameters and arrays used, ppcg requires 2497 /// a list of all isl_ids in use. As PPCG traditionally performs 2498 /// source-to-source compilation each of these isl_ids is mapped to the 2499 /// expression that represents it. As we do not have a corresponding 2500 /// expression in Polly, we just map each id to a 'zero' expression to match 2501 /// the data format that ppcg expects. 2502 /// 2503 /// @returns Retun a map from collected ids to 'zero' ast expressions. 2504 __isl_give isl_id_to_ast_expr *getNames() { 2505 auto *Names = isl_id_to_ast_expr_alloc( 2506 S->getIslCtx(), 2507 S->getNumParams() + std::distance(S->array_begin(), S->array_end())); 2508 auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx())); 2509 2510 for (const SCEV *P : S->parameters()) { 2511 isl_id *Id = S->getIdForParam(P); 2512 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero)); 2513 } 2514 2515 for (auto &Array : S->arrays()) { 2516 auto Id = Array->getBasePtrId().release(); 2517 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero)); 2518 } 2519 2520 isl_ast_expr_free(Zero); 2521 2522 return Names; 2523 } 2524 2525 /// Create a new PPCG scop from the current scop. 2526 /// 2527 /// The PPCG scop is initialized with data from the current polly::Scop. From 2528 /// this initial data, the data-dependences in the PPCG scop are initialized. 2529 /// We do not use Polly's dependence analysis for now, to ensure we match 2530 /// the PPCG default behaviour more closely. 2531 /// 2532 /// @returns A new ppcg scop. 2533 ppcg_scop *createPPCGScop() { 2534 MustKillsInfo KillsInfo = computeMustKillsInfo(*S); 2535 2536 auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop)); 2537 2538 PPCGScop->options = createPPCGOptions(); 2539 // enable live range reordering 2540 PPCGScop->options->live_range_reordering = 1; 2541 2542 PPCGScop->start = 0; 2543 PPCGScop->end = 0; 2544 2545 PPCGScop->context = S->getContext(); 2546 PPCGScop->domain = S->getDomains(); 2547 // TODO: investigate this further. PPCG calls collect_call_domains. 2548 PPCGScop->call = isl_union_set_from_set(S->getContext()); 2549 PPCGScop->tagged_reads = getTaggedReads(); 2550 PPCGScop->reads = S->getReads(); 2551 PPCGScop->live_in = nullptr; 2552 PPCGScop->tagged_may_writes = getTaggedMayWrites(); 2553 PPCGScop->may_writes = S->getWrites(); 2554 PPCGScop->tagged_must_writes = getTaggedMustWrites(); 2555 PPCGScop->must_writes = S->getMustWrites(); 2556 PPCGScop->live_out = nullptr; 2557 PPCGScop->tagged_must_kills = KillsInfo.TaggedMustKills.take(); 2558 PPCGScop->must_kills = KillsInfo.MustKills.take(); 2559 2560 PPCGScop->tagger = nullptr; 2561 PPCGScop->independence = 2562 isl_union_map_empty(isl_set_get_space(PPCGScop->context)); 2563 PPCGScop->dep_flow = nullptr; 2564 PPCGScop->tagged_dep_flow = nullptr; 2565 PPCGScop->dep_false = nullptr; 2566 PPCGScop->dep_forced = nullptr; 2567 PPCGScop->dep_order = nullptr; 2568 PPCGScop->tagged_dep_order = nullptr; 2569 2570 PPCGScop->schedule = S->getScheduleTree(); 2571 // If we have something non-trivial to kill, add it to the schedule 2572 if (KillsInfo.KillsSchedule.get()) 2573 PPCGScop->schedule = isl_schedule_sequence( 2574 PPCGScop->schedule, KillsInfo.KillsSchedule.take()); 2575 2576 PPCGScop->names = getNames(); 2577 PPCGScop->pet = nullptr; 2578 2579 compute_tagger(PPCGScop); 2580 compute_dependences(PPCGScop); 2581 eliminate_dead_code(PPCGScop); 2582 2583 return PPCGScop; 2584 } 2585 2586 /// Collect the array accesses in a statement. 2587 /// 2588 /// @param Stmt The statement for which to collect the accesses. 2589 /// 2590 /// @returns A list of array accesses. 2591 gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) { 2592 gpu_stmt_access *Accesses = nullptr; 2593 2594 for (MemoryAccess *Acc : Stmt) { 2595 auto Access = isl_alloc_type(S->getIslCtx(), struct gpu_stmt_access); 2596 Access->read = Acc->isRead(); 2597 Access->write = Acc->isWrite(); 2598 Access->access = Acc->getAccessRelation().release(); 2599 isl_space *Space = isl_map_get_space(Access->access); 2600 Space = isl_space_range(Space); 2601 Space = isl_space_from_range(Space); 2602 Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release()); 2603 isl_map *Universe = isl_map_universe(Space); 2604 Access->tagged_access = 2605 isl_map_domain_product(Acc->getAccessRelation().release(), Universe); 2606 Access->exact_write = !Acc->isMayWrite(); 2607 Access->ref_id = Acc->getId().release(); 2608 Access->next = Accesses; 2609 Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions(); 2610 Accesses = Access; 2611 } 2612 2613 return Accesses; 2614 } 2615 2616 /// Collect the list of GPU statements. 2617 /// 2618 /// Each statement has an id, a pointer to the underlying data structure, 2619 /// as well as a list with all memory accesses. 2620 /// 2621 /// TODO: Initialize the list of memory accesses. 2622 /// 2623 /// @returns A linked-list of statements. 2624 gpu_stmt *getStatements() { 2625 gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx(), struct gpu_stmt, 2626 std::distance(S->begin(), S->end())); 2627 2628 int i = 0; 2629 for (auto &Stmt : *S) { 2630 gpu_stmt *GPUStmt = &Stmts[i]; 2631 2632 GPUStmt->id = Stmt.getDomainId(); 2633 2634 // We use the pet stmt pointer to keep track of the Polly statements. 2635 GPUStmt->stmt = (pet_stmt *)&Stmt; 2636 GPUStmt->accesses = getStmtAccesses(Stmt); 2637 i++; 2638 } 2639 2640 return Stmts; 2641 } 2642 2643 /// Derive the extent of an array. 2644 /// 2645 /// The extent of an array is the set of elements that are within the 2646 /// accessed array. For the inner dimensions, the extent constraints are 2647 /// 0 and the size of the corresponding array dimension. For the first 2648 /// (outermost) dimension, the extent constraints are the minimal and maximal 2649 /// subscript value for the first dimension. 2650 /// 2651 /// @param Array The array to derive the extent for. 2652 /// 2653 /// @returns An isl_set describing the extent of the array. 2654 __isl_give isl_set *getExtent(ScopArrayInfo *Array) { 2655 unsigned NumDims = Array->getNumberOfDimensions(); 2656 isl_union_map *Accesses = S->getAccesses(); 2657 Accesses = isl_union_map_intersect_domain(Accesses, S->getDomains()); 2658 Accesses = isl_union_map_detect_equalities(Accesses); 2659 isl_union_set *AccessUSet = isl_union_map_range(Accesses); 2660 AccessUSet = isl_union_set_coalesce(AccessUSet); 2661 AccessUSet = isl_union_set_detect_equalities(AccessUSet); 2662 AccessUSet = isl_union_set_coalesce(AccessUSet); 2663 2664 if (isl_union_set_is_empty(AccessUSet)) { 2665 isl_union_set_free(AccessUSet); 2666 return isl_set_empty(Array->getSpace().release()); 2667 } 2668 2669 if (Array->getNumberOfDimensions() == 0) { 2670 isl_union_set_free(AccessUSet); 2671 return isl_set_universe(Array->getSpace().release()); 2672 } 2673 2674 isl_set *AccessSet = 2675 isl_union_set_extract_set(AccessUSet, Array->getSpace().release()); 2676 2677 isl_union_set_free(AccessUSet); 2678 isl_local_space *LS = 2679 isl_local_space_from_space(Array->getSpace().release()); 2680 2681 isl_pw_aff *Val = 2682 isl_pw_aff_from_aff(isl_aff_var_on_domain(LS, isl_dim_set, 0)); 2683 2684 isl_pw_aff *OuterMin = isl_set_dim_min(isl_set_copy(AccessSet), 0); 2685 isl_pw_aff *OuterMax = isl_set_dim_max(AccessSet, 0); 2686 OuterMin = isl_pw_aff_add_dims(OuterMin, isl_dim_in, 2687 isl_pw_aff_dim(Val, isl_dim_in)); 2688 OuterMax = isl_pw_aff_add_dims(OuterMax, isl_dim_in, 2689 isl_pw_aff_dim(Val, isl_dim_in)); 2690 OuterMin = isl_pw_aff_set_tuple_id(OuterMin, isl_dim_in, 2691 Array->getBasePtrId().release()); 2692 OuterMax = isl_pw_aff_set_tuple_id(OuterMax, isl_dim_in, 2693 Array->getBasePtrId().release()); 2694 2695 isl_set *Extent = isl_set_universe(Array->getSpace().release()); 2696 2697 Extent = isl_set_intersect( 2698 Extent, isl_pw_aff_le_set(OuterMin, isl_pw_aff_copy(Val))); 2699 Extent = isl_set_intersect(Extent, isl_pw_aff_ge_set(OuterMax, Val)); 2700 2701 for (unsigned i = 1; i < NumDims; ++i) 2702 Extent = isl_set_lower_bound_si(Extent, isl_dim_set, i, 0); 2703 2704 for (unsigned i = 0; i < NumDims; ++i) { 2705 isl_pw_aff *PwAff = 2706 const_cast<isl_pw_aff *>(Array->getDimensionSizePw(i).release()); 2707 2708 // isl_pw_aff can be NULL for zero dimension. Only in the case of a 2709 // Fortran array will we have a legitimate dimension. 2710 if (!PwAff) { 2711 assert(i == 0 && "invalid dimension isl_pw_aff for nonzero dimension"); 2712 continue; 2713 } 2714 2715 isl_pw_aff *Val = isl_pw_aff_from_aff(isl_aff_var_on_domain( 2716 isl_local_space_from_space(Array->getSpace().release()), isl_dim_set, 2717 i)); 2718 PwAff = isl_pw_aff_add_dims(PwAff, isl_dim_in, 2719 isl_pw_aff_dim(Val, isl_dim_in)); 2720 PwAff = isl_pw_aff_set_tuple_id(PwAff, isl_dim_in, 2721 isl_pw_aff_get_tuple_id(Val, isl_dim_in)); 2722 auto *Set = isl_pw_aff_gt_set(PwAff, Val); 2723 Extent = isl_set_intersect(Set, Extent); 2724 } 2725 2726 return Extent; 2727 } 2728 2729 /// Derive the bounds of an array. 2730 /// 2731 /// For the first dimension we derive the bound of the array from the extent 2732 /// of this dimension. For inner dimensions we obtain their size directly from 2733 /// ScopArrayInfo. 2734 /// 2735 /// @param PPCGArray The array to compute bounds for. 2736 /// @param Array The polly array from which to take the information. 2737 void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) { 2738 isl_pw_aff_list *BoundsList = 2739 isl_pw_aff_list_alloc(S->getIslCtx(), PPCGArray.n_index); 2740 std::vector<isl::pw_aff> PwAffs; 2741 2742 isl_space *AlignSpace = S->getParamSpace(); 2743 AlignSpace = isl_space_add_dims(AlignSpace, isl_dim_set, 1); 2744 2745 if (PPCGArray.n_index > 0) { 2746 if (isl_set_is_empty(PPCGArray.extent)) { 2747 isl_set *Dom = isl_set_copy(PPCGArray.extent); 2748 isl_local_space *LS = isl_local_space_from_space( 2749 isl_space_params(isl_set_get_space(Dom))); 2750 isl_set_free(Dom); 2751 isl_pw_aff *Zero = isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS)); 2752 Zero = isl_pw_aff_align_params(Zero, isl_space_copy(AlignSpace)); 2753 PwAffs.push_back(isl::manage(isl_pw_aff_copy(Zero))); 2754 BoundsList = isl_pw_aff_list_insert(BoundsList, 0, Zero); 2755 } else { 2756 isl_set *Dom = isl_set_copy(PPCGArray.extent); 2757 Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1); 2758 isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0); 2759 isl_set_free(Dom); 2760 Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound)); 2761 isl_local_space *LS = 2762 isl_local_space_from_space(isl_set_get_space(Dom)); 2763 isl_aff *One = isl_aff_zero_on_domain(LS); 2764 One = isl_aff_add_constant_si(One, 1); 2765 Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One)); 2766 Bound = isl_pw_aff_gist(Bound, S->getContext()); 2767 Bound = isl_pw_aff_align_params(Bound, isl_space_copy(AlignSpace)); 2768 PwAffs.push_back(isl::manage(isl_pw_aff_copy(Bound))); 2769 BoundsList = isl_pw_aff_list_insert(BoundsList, 0, Bound); 2770 } 2771 } 2772 2773 for (unsigned i = 1; i < PPCGArray.n_index; ++i) { 2774 isl_pw_aff *Bound = Array->getDimensionSizePw(i).release(); 2775 auto LS = isl_pw_aff_get_domain_space(Bound); 2776 auto Aff = isl_multi_aff_zero(LS); 2777 Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff); 2778 Bound = isl_pw_aff_align_params(Bound, isl_space_copy(AlignSpace)); 2779 PwAffs.push_back(isl::manage(isl_pw_aff_copy(Bound))); 2780 BoundsList = isl_pw_aff_list_insert(BoundsList, i, Bound); 2781 } 2782 2783 isl_space_free(AlignSpace); 2784 isl_space *BoundsSpace = isl_set_get_space(PPCGArray.extent); 2785 2786 assert(BoundsSpace && "Unable to access space of array."); 2787 assert(BoundsList && "Unable to access list of bounds."); 2788 2789 PPCGArray.bound = 2790 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace, BoundsList); 2791 assert(PPCGArray.bound && "PPCGArray.bound was not constructed correctly."); 2792 } 2793 2794 /// Create the arrays for @p PPCGProg. 2795 /// 2796 /// @param PPCGProg The program to compute the arrays for. 2797 void createArrays(gpu_prog *PPCGProg, 2798 const SmallVector<ScopArrayInfo *, 4> &ValidSAIs) { 2799 int i = 0; 2800 for (auto &Array : ValidSAIs) { 2801 std::string TypeName; 2802 raw_string_ostream OS(TypeName); 2803 2804 OS << *Array->getElementType(); 2805 TypeName = OS.str(); 2806 2807 gpu_array_info &PPCGArray = PPCGProg->array[i]; 2808 2809 PPCGArray.space = Array->getSpace().release(); 2810 PPCGArray.type = strdup(TypeName.c_str()); 2811 PPCGArray.size = Array->getElementType()->getPrimitiveSizeInBits() / 8; 2812 PPCGArray.name = strdup(Array->getName().c_str()); 2813 PPCGArray.extent = nullptr; 2814 PPCGArray.n_index = Array->getNumberOfDimensions(); 2815 PPCGArray.extent = getExtent(Array); 2816 PPCGArray.n_ref = 0; 2817 PPCGArray.refs = nullptr; 2818 PPCGArray.accessed = true; 2819 PPCGArray.read_only_scalar = 2820 Array->isReadOnly() && Array->getNumberOfDimensions() == 0; 2821 PPCGArray.has_compound_element = false; 2822 PPCGArray.local = false; 2823 PPCGArray.declare_local = false; 2824 PPCGArray.global = false; 2825 PPCGArray.linearize = false; 2826 PPCGArray.dep_order = nullptr; 2827 PPCGArray.user = Array; 2828 2829 PPCGArray.bound = nullptr; 2830 setArrayBounds(PPCGArray, Array); 2831 i++; 2832 2833 collect_references(PPCGProg, &PPCGArray); 2834 } 2835 } 2836 2837 /// Create an identity map between the arrays in the scop. 2838 /// 2839 /// @returns An identity map between the arrays in the scop. 2840 isl_union_map *getArrayIdentity() { 2841 isl_union_map *Maps = isl_union_map_empty(S->getParamSpace()); 2842 2843 for (auto &Array : S->arrays()) { 2844 isl_space *Space = Array->getSpace().release(); 2845 Space = isl_space_map_from_set(Space); 2846 isl_map *Identity = isl_map_identity(Space); 2847 Maps = isl_union_map_add_map(Maps, Identity); 2848 } 2849 2850 return Maps; 2851 } 2852 2853 /// Create a default-initialized PPCG GPU program. 2854 /// 2855 /// @returns A new gpu program description. 2856 gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) { 2857 2858 if (!PPCGScop) 2859 return nullptr; 2860 2861 auto PPCGProg = isl_calloc_type(S->getIslCtx(), struct gpu_prog); 2862 2863 PPCGProg->ctx = S->getIslCtx(); 2864 PPCGProg->scop = PPCGScop; 2865 PPCGProg->context = isl_set_copy(PPCGScop->context); 2866 PPCGProg->read = isl_union_map_copy(PPCGScop->reads); 2867 PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes); 2868 PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes); 2869 PPCGProg->tagged_must_kill = 2870 isl_union_map_copy(PPCGScop->tagged_must_kills); 2871 PPCGProg->to_inner = getArrayIdentity(); 2872 PPCGProg->to_outer = getArrayIdentity(); 2873 // TODO: verify that this assignment is correct. 2874 PPCGProg->any_to_outer = nullptr; 2875 2876 // this needs to be set when live range reordering is enabled. 2877 // NOTE: I believe that is conservatively correct. I'm not sure 2878 // what the semantics of this is. 2879 // Quoting PPCG/gpu.h: "Order dependences on non-scalars." 2880 PPCGProg->array_order = 2881 isl_union_map_empty(isl_set_get_space(PPCGScop->context)); 2882 PPCGProg->n_stmts = std::distance(S->begin(), S->end()); 2883 PPCGProg->stmts = getStatements(); 2884 2885 // Only consider arrays that have a non-empty extent. 2886 // Otherwise, this will cause us to consider the following kinds of 2887 // empty arrays: 2888 // 1. Invariant loads that are represented by SAI objects. 2889 // 2. Arrays with statically known zero size. 2890 auto ValidSAIsRange = 2891 make_filter_range(S->arrays(), [this](ScopArrayInfo *SAI) -> bool { 2892 return !isl::manage(getExtent(SAI)).is_empty(); 2893 }); 2894 SmallVector<ScopArrayInfo *, 4> ValidSAIs(ValidSAIsRange.begin(), 2895 ValidSAIsRange.end()); 2896 2897 PPCGProg->n_array = 2898 ValidSAIs.size(); // std::distance(S->array_begin(), S->array_end()); 2899 PPCGProg->array = isl_calloc_array(S->getIslCtx(), struct gpu_array_info, 2900 PPCGProg->n_array); 2901 2902 createArrays(PPCGProg, ValidSAIs); 2903 2904 PPCGProg->may_persist = compute_may_persist(PPCGProg); 2905 return PPCGProg; 2906 } 2907 2908 struct PrintGPUUserData { 2909 struct cuda_info *CudaInfo; 2910 struct gpu_prog *PPCGProg; 2911 std::vector<ppcg_kernel *> Kernels; 2912 }; 2913 2914 /// Print a user statement node in the host code. 2915 /// 2916 /// We use ppcg's printing facilities to print the actual statement and 2917 /// additionally build up a list of all kernels that are encountered in the 2918 /// host ast. 2919 /// 2920 /// @param P The printer to print to 2921 /// @param Options The printing options to use 2922 /// @param Node The node to print 2923 /// @param User A user pointer to carry additional data. This pointer is 2924 /// expected to be of type PrintGPUUserData. 2925 /// 2926 /// @returns A printer to which the output has been printed. 2927 static __isl_give isl_printer * 2928 printHostUser(__isl_take isl_printer *P, 2929 __isl_take isl_ast_print_options *Options, 2930 __isl_take isl_ast_node *Node, void *User) { 2931 auto Data = (struct PrintGPUUserData *)User; 2932 auto Id = isl_ast_node_get_annotation(Node); 2933 2934 if (Id) { 2935 bool IsUser = !strcmp(isl_id_get_name(Id), "user"); 2936 2937 // If this is a user statement, format it ourselves as ppcg would 2938 // otherwise try to call pet functionality that is not available in 2939 // Polly. 2940 if (IsUser) { 2941 P = isl_printer_start_line(P); 2942 P = isl_printer_print_ast_node(P, Node); 2943 P = isl_printer_end_line(P); 2944 isl_id_free(Id); 2945 isl_ast_print_options_free(Options); 2946 return P; 2947 } 2948 2949 auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id); 2950 isl_id_free(Id); 2951 Data->Kernels.push_back(Kernel); 2952 } 2953 2954 return print_host_user(P, Options, Node, User); 2955 } 2956 2957 /// Print C code corresponding to the control flow in @p Kernel. 2958 /// 2959 /// @param Kernel The kernel to print 2960 void printKernel(ppcg_kernel *Kernel) { 2961 auto *P = isl_printer_to_str(S->getIslCtx()); 2962 P = isl_printer_set_output_format(P, ISL_FORMAT_C); 2963 auto *Options = isl_ast_print_options_alloc(S->getIslCtx()); 2964 P = isl_ast_node_print(Kernel->tree, P, Options); 2965 char *String = isl_printer_get_str(P); 2966 printf("%s\n", String); 2967 free(String); 2968 isl_printer_free(P); 2969 } 2970 2971 /// Print C code corresponding to the GPU code described by @p Tree. 2972 /// 2973 /// @param Tree An AST describing GPU code 2974 /// @param PPCGProg The PPCG program from which @Tree has been constructed. 2975 void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) { 2976 auto *P = isl_printer_to_str(S->getIslCtx()); 2977 P = isl_printer_set_output_format(P, ISL_FORMAT_C); 2978 2979 PrintGPUUserData Data; 2980 Data.PPCGProg = PPCGProg; 2981 2982 auto *Options = isl_ast_print_options_alloc(S->getIslCtx()); 2983 Options = 2984 isl_ast_print_options_set_print_user(Options, printHostUser, &Data); 2985 P = isl_ast_node_print(Tree, P, Options); 2986 char *String = isl_printer_get_str(P); 2987 printf("# host\n"); 2988 printf("%s\n", String); 2989 free(String); 2990 isl_printer_free(P); 2991 2992 for (auto Kernel : Data.Kernels) { 2993 printf("# kernel%d\n", Kernel->id); 2994 printKernel(Kernel); 2995 } 2996 } 2997 2998 // Generate a GPU program using PPCG. 2999 // 3000 // GPU mapping consists of multiple steps: 3001 // 3002 // 1) Compute new schedule for the program. 3003 // 2) Map schedule to GPU (TODO) 3004 // 3) Generate code for new schedule (TODO) 3005 // 3006 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer 3007 // is mostly CPU specific. Instead, we use PPCG's GPU code generation 3008 // strategy directly from this pass. 3009 gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) { 3010 3011 auto PPCGGen = isl_calloc_type(S->getIslCtx(), struct gpu_gen); 3012 3013 PPCGGen->ctx = S->getIslCtx(); 3014 PPCGGen->options = PPCGScop->options; 3015 PPCGGen->print = nullptr; 3016 PPCGGen->print_user = nullptr; 3017 PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt; 3018 PPCGGen->prog = PPCGProg; 3019 PPCGGen->tree = nullptr; 3020 PPCGGen->types.n = 0; 3021 PPCGGen->types.name = nullptr; 3022 PPCGGen->sizes = nullptr; 3023 PPCGGen->used_sizes = nullptr; 3024 PPCGGen->kernel_id = 0; 3025 3026 // Set scheduling strategy to same strategy PPCG is using. 3027 isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true); 3028 isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true); 3029 isl_options_set_schedule_whole_component(PPCGGen->ctx, false); 3030 3031 isl_schedule *Schedule = get_schedule(PPCGGen); 3032 3033 int has_permutable = has_any_permutable_node(Schedule); 3034 3035 if (!has_permutable || has_permutable < 0) { 3036 Schedule = isl_schedule_free(Schedule); 3037 } else { 3038 Schedule = map_to_device(PPCGGen, Schedule); 3039 PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule)); 3040 } 3041 3042 if (DumpSchedule) { 3043 isl_printer *P = isl_printer_to_str(S->getIslCtx()); 3044 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK); 3045 P = isl_printer_print_str(P, "Schedule\n"); 3046 P = isl_printer_print_str(P, "========\n"); 3047 if (Schedule) 3048 P = isl_printer_print_schedule(P, Schedule); 3049 else 3050 P = isl_printer_print_str(P, "No schedule found\n"); 3051 3052 printf("%s\n", isl_printer_get_str(P)); 3053 isl_printer_free(P); 3054 } 3055 3056 if (DumpCode) { 3057 printf("Code\n"); 3058 printf("====\n"); 3059 if (PPCGGen->tree) 3060 printGPUTree(PPCGGen->tree, PPCGProg); 3061 else 3062 printf("No code generated\n"); 3063 } 3064 3065 isl_schedule_free(Schedule); 3066 3067 return PPCGGen; 3068 } 3069 3070 /// Free gpu_gen structure. 3071 /// 3072 /// @param PPCGGen The ppcg_gen object to free. 3073 void freePPCGGen(gpu_gen *PPCGGen) { 3074 isl_ast_node_free(PPCGGen->tree); 3075 isl_union_map_free(PPCGGen->sizes); 3076 isl_union_map_free(PPCGGen->used_sizes); 3077 free(PPCGGen); 3078 } 3079 3080 /// Free the options in the ppcg scop structure. 3081 /// 3082 /// ppcg is not freeing these options for us. To avoid leaks we do this 3083 /// ourselves. 3084 /// 3085 /// @param PPCGScop The scop referencing the options to free. 3086 void freeOptions(ppcg_scop *PPCGScop) { 3087 free(PPCGScop->options->debug); 3088 PPCGScop->options->debug = nullptr; 3089 free(PPCGScop->options); 3090 PPCGScop->options = nullptr; 3091 } 3092 3093 /// Approximate the number of points in the set. 3094 /// 3095 /// This function returns an ast expression that overapproximates the number 3096 /// of points in an isl set through the rectangular hull surrounding this set. 3097 /// 3098 /// @param Set The set to count. 3099 /// @param Build The isl ast build object to use for creating the ast 3100 /// expression. 3101 /// 3102 /// @returns An approximation of the number of points in the set. 3103 __isl_give isl_ast_expr *approxPointsInSet(__isl_take isl_set *Set, 3104 __isl_keep isl_ast_build *Build) { 3105 3106 isl_val *One = isl_val_int_from_si(isl_set_get_ctx(Set), 1); 3107 auto *Expr = isl_ast_expr_from_val(isl_val_copy(One)); 3108 3109 isl_space *Space = isl_set_get_space(Set); 3110 Space = isl_space_params(Space); 3111 auto *Univ = isl_set_universe(Space); 3112 isl_pw_aff *OneAff = isl_pw_aff_val_on_domain(Univ, One); 3113 3114 for (long i = 0; i < isl_set_dim(Set, isl_dim_set); i++) { 3115 isl_pw_aff *Max = isl_set_dim_max(isl_set_copy(Set), i); 3116 isl_pw_aff *Min = isl_set_dim_min(isl_set_copy(Set), i); 3117 isl_pw_aff *DimSize = isl_pw_aff_sub(Max, Min); 3118 DimSize = isl_pw_aff_add(DimSize, isl_pw_aff_copy(OneAff)); 3119 auto DimSizeExpr = isl_ast_build_expr_from_pw_aff(Build, DimSize); 3120 Expr = isl_ast_expr_mul(Expr, DimSizeExpr); 3121 } 3122 3123 isl_set_free(Set); 3124 isl_pw_aff_free(OneAff); 3125 3126 return Expr; 3127 } 3128 3129 /// Approximate a number of dynamic instructions executed by a given 3130 /// statement. 3131 /// 3132 /// @param Stmt The statement for which to compute the number of dynamic 3133 /// instructions. 3134 /// @param Build The isl ast build object to use for creating the ast 3135 /// expression. 3136 /// @returns An approximation of the number of dynamic instructions executed 3137 /// by @p Stmt. 3138 __isl_give isl_ast_expr *approxDynamicInst(ScopStmt &Stmt, 3139 __isl_keep isl_ast_build *Build) { 3140 auto Iterations = approxPointsInSet(Stmt.getDomain(), Build); 3141 3142 long InstCount = 0; 3143 3144 if (Stmt.isBlockStmt()) { 3145 auto *BB = Stmt.getBasicBlock(); 3146 InstCount = std::distance(BB->begin(), BB->end()); 3147 } else { 3148 auto *R = Stmt.getRegion(); 3149 3150 for (auto *BB : R->blocks()) { 3151 InstCount += std::distance(BB->begin(), BB->end()); 3152 } 3153 } 3154 3155 isl_val *InstVal = isl_val_int_from_si(S->getIslCtx(), InstCount); 3156 auto *InstExpr = isl_ast_expr_from_val(InstVal); 3157 return isl_ast_expr_mul(InstExpr, Iterations); 3158 } 3159 3160 /// Approximate dynamic instructions executed in scop. 3161 /// 3162 /// @param S The scop for which to approximate dynamic instructions. 3163 /// @param Build The isl ast build object to use for creating the ast 3164 /// expression. 3165 /// @returns An approximation of the number of dynamic instructions executed 3166 /// in @p S. 3167 __isl_give isl_ast_expr * 3168 getNumberOfIterations(Scop &S, __isl_keep isl_ast_build *Build) { 3169 isl_ast_expr *Instructions; 3170 3171 isl_val *Zero = isl_val_int_from_si(S.getIslCtx(), 0); 3172 Instructions = isl_ast_expr_from_val(Zero); 3173 3174 for (ScopStmt &Stmt : S) { 3175 isl_ast_expr *StmtInstructions = approxDynamicInst(Stmt, Build); 3176 Instructions = isl_ast_expr_add(Instructions, StmtInstructions); 3177 } 3178 return Instructions; 3179 } 3180 3181 /// Create a check that ensures sufficient compute in scop. 3182 /// 3183 /// @param S The scop for which to ensure sufficient compute. 3184 /// @param Build The isl ast build object to use for creating the ast 3185 /// expression. 3186 /// @returns An expression that evaluates to TRUE in case of sufficient 3187 /// compute and to FALSE, otherwise. 3188 __isl_give isl_ast_expr * 3189 createSufficientComputeCheck(Scop &S, __isl_keep isl_ast_build *Build) { 3190 auto Iterations = getNumberOfIterations(S, Build); 3191 auto *MinComputeVal = isl_val_int_from_si(S.getIslCtx(), MinCompute); 3192 auto *MinComputeExpr = isl_ast_expr_from_val(MinComputeVal); 3193 return isl_ast_expr_ge(Iterations, MinComputeExpr); 3194 } 3195 3196 /// Check if the basic block contains a function we cannot codegen for GPU 3197 /// kernels. 3198 /// 3199 /// If this basic block does something with a `Function` other than calling 3200 /// a function that we support in a kernel, return true. 3201 bool containsInvalidKernelFunctionInBlock(const BasicBlock *BB, 3202 bool AllowCUDALibDevice) { 3203 for (const Instruction &Inst : *BB) { 3204 const CallInst *Call = dyn_cast<CallInst>(&Inst); 3205 if (Call && isValidFunctionInKernel(Call->getCalledFunction(), 3206 AllowCUDALibDevice)) { 3207 continue; 3208 } 3209 3210 for (Value *SrcVal : Inst.operands()) { 3211 PointerType *p = dyn_cast<PointerType>(SrcVal->getType()); 3212 if (!p) 3213 continue; 3214 if (isa<FunctionType>(p->getElementType())) 3215 return true; 3216 } 3217 } 3218 return false; 3219 } 3220 3221 /// Return whether the Scop S uses functions in a way that we do not support. 3222 bool containsInvalidKernelFunction(const Scop &S, bool AllowCUDALibDevice) { 3223 for (auto &Stmt : S) { 3224 if (Stmt.isBlockStmt()) { 3225 if (containsInvalidKernelFunctionInBlock(Stmt.getBasicBlock(), 3226 AllowCUDALibDevice)) 3227 return true; 3228 } else { 3229 assert(Stmt.isRegionStmt() && 3230 "Stmt was neither block nor region statement"); 3231 for (const BasicBlock *BB : Stmt.getRegion()->blocks()) 3232 if (containsInvalidKernelFunctionInBlock(BB, AllowCUDALibDevice)) 3233 return true; 3234 } 3235 } 3236 return false; 3237 } 3238 3239 /// Generate code for a given GPU AST described by @p Root. 3240 /// 3241 /// @param Root An isl_ast_node pointing to the root of the GPU AST. 3242 /// @param Prog The GPU Program to generate code for. 3243 void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) { 3244 ScopAnnotator Annotator; 3245 Annotator.buildAliasScopes(*S); 3246 3247 Region *R = &S->getRegion(); 3248 3249 simplifyRegion(R, DT, LI, RI); 3250 3251 BasicBlock *EnteringBB = R->getEnteringBlock(); 3252 3253 PollyIRBuilder Builder = createPollyIRBuilder(EnteringBB, Annotator); 3254 3255 // Only build the run-time condition and parameters _after_ having 3256 // introduced the conditional branch. This is important as the conditional 3257 // branch will guard the original scop from new induction variables that 3258 // the SCEVExpander may introduce while code generating the parameters and 3259 // which may introduce scalar dependences that prevent us from correctly 3260 // code generating this scop. 3261 BBPair StartExitBlocks; 3262 BranchInst *CondBr = nullptr; 3263 std::tie(StartExitBlocks, CondBr) = 3264 executeScopConditionally(*S, Builder.getTrue(), *DT, *RI, *LI); 3265 BasicBlock *StartBlock = std::get<0>(StartExitBlocks); 3266 3267 assert(CondBr && "CondBr not initialized by executeScopConditionally"); 3268 3269 GPUNodeBuilder NodeBuilder(Builder, Annotator, *DL, *LI, *SE, *DT, *S, 3270 StartBlock, Prog, Runtime, Architecture); 3271 3272 // TODO: Handle LICM 3273 auto SplitBlock = StartBlock->getSinglePredecessor(); 3274 Builder.SetInsertPoint(SplitBlock->getTerminator()); 3275 NodeBuilder.addParameters(S->getContext()); 3276 3277 isl_ast_build *Build = isl_ast_build_alloc(S->getIslCtx()); 3278 isl_ast_expr *Condition = IslAst::buildRunCondition(*S, Build); 3279 isl_ast_expr *SufficientCompute = createSufficientComputeCheck(*S, Build); 3280 Condition = isl_ast_expr_and(Condition, SufficientCompute); 3281 isl_ast_build_free(Build); 3282 3283 // preload invariant loads. Note: This should happen before the RTC 3284 // because the RTC may depend on values that are invariant load hoisted. 3285 if (!NodeBuilder.preloadInvariantLoads()) 3286 report_fatal_error("preloading invariant loads failed in function: " + 3287 S->getFunction().getName() + 3288 " | Scop Region: " + S->getNameStr()); 3289 3290 Value *RTC = NodeBuilder.createRTC(Condition); 3291 Builder.GetInsertBlock()->getTerminator()->setOperand(0, RTC); 3292 3293 Builder.SetInsertPoint(&*StartBlock->begin()); 3294 3295 NodeBuilder.create(Root); 3296 3297 /// In case a sequential kernel has more surrounding loops as any parallel 3298 /// kernel, the SCoP is probably mostly sequential. Hence, there is no 3299 /// point in running it on a GPU. 3300 if (NodeBuilder.DeepestSequential > NodeBuilder.DeepestParallel) 3301 CondBr->setOperand(0, Builder.getFalse()); 3302 3303 if (!NodeBuilder.BuildSuccessful) 3304 CondBr->setOperand(0, Builder.getFalse()); 3305 } 3306 3307 bool runOnScop(Scop &CurrentScop) override { 3308 S = &CurrentScop; 3309 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3310 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3311 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 3312 DL = &S->getRegion().getEntry()->getModule()->getDataLayout(); 3313 RI = &getAnalysis<RegionInfoPass>().getRegionInfo(); 3314 3315 // We currently do not support functions other than intrinsics inside 3316 // kernels, as code generation will need to offload function calls to the 3317 // kernel. This may lead to a kernel trying to call a function on the host. 3318 // This also allows us to prevent codegen from trying to take the 3319 // address of an intrinsic function to send to the kernel. 3320 if (containsInvalidKernelFunction(CurrentScop, 3321 Architecture == GPUArch::NVPTX64)) { 3322 DEBUG( 3323 dbgs() 3324 << "Scop contains function which cannot be materialised in a GPU " 3325 "kernel. Bailing out.\n";); 3326 return false; 3327 } 3328 3329 auto PPCGScop = createPPCGScop(); 3330 auto PPCGProg = createPPCGProg(PPCGScop); 3331 auto PPCGGen = generateGPU(PPCGScop, PPCGProg); 3332 3333 if (PPCGGen->tree) { 3334 generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg); 3335 CurrentScop.markAsToBeSkipped(); 3336 } 3337 3338 freeOptions(PPCGScop); 3339 freePPCGGen(PPCGGen); 3340 gpu_prog_free(PPCGProg); 3341 ppcg_scop_free(PPCGScop); 3342 3343 return true; 3344 } 3345 3346 void printScop(raw_ostream &, Scop &) const override {} 3347 3348 void getAnalysisUsage(AnalysisUsage &AU) const override { 3349 AU.addRequired<DominatorTreeWrapperPass>(); 3350 AU.addRequired<RegionInfoPass>(); 3351 AU.addRequired<ScalarEvolutionWrapperPass>(); 3352 AU.addRequired<ScopDetectionWrapperPass>(); 3353 AU.addRequired<ScopInfoRegionPass>(); 3354 AU.addRequired<LoopInfoWrapperPass>(); 3355 3356 AU.addPreserved<AAResultsWrapperPass>(); 3357 AU.addPreserved<BasicAAWrapperPass>(); 3358 AU.addPreserved<LoopInfoWrapperPass>(); 3359 AU.addPreserved<DominatorTreeWrapperPass>(); 3360 AU.addPreserved<GlobalsAAWrapperPass>(); 3361 AU.addPreserved<ScopDetectionWrapperPass>(); 3362 AU.addPreserved<ScalarEvolutionWrapperPass>(); 3363 AU.addPreserved<SCEVAAWrapperPass>(); 3364 3365 // FIXME: We do not yet add regions for the newly generated code to the 3366 // region tree. 3367 AU.addPreserved<RegionInfoPass>(); 3368 AU.addPreserved<ScopInfoRegionPass>(); 3369 } 3370 }; 3371 } // namespace 3372 3373 char PPCGCodeGeneration::ID = 1; 3374 3375 Pass *polly::createPPCGCodeGenerationPass(GPUArch Arch, GPURuntime Runtime) { 3376 PPCGCodeGeneration *generator = new PPCGCodeGeneration(); 3377 generator->Runtime = Runtime; 3378 generator->Architecture = Arch; 3379 return generator; 3380 } 3381 3382 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg", 3383 "Polly - Apply PPCG translation to SCOP", false, false) 3384 INITIALIZE_PASS_DEPENDENCY(DependenceInfo); 3385 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass); 3386 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass); 3387 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass); 3388 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass); 3389 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass); 3390 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg", 3391 "Polly - Apply PPCG translation to SCOP", false, false) 3392