1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Take a scop created by ScopInfo and map it to GPU code using the ppcg 10 // GPU mapping strategy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "polly/CodeGen/PPCGCodeGeneration.h" 15 #include "polly/CodeGen/CodeGeneration.h" 16 #include "polly/CodeGen/IslAst.h" 17 #include "polly/CodeGen/IslNodeBuilder.h" 18 #include "polly/CodeGen/PerfMonitor.h" 19 #include "polly/CodeGen/Utils.h" 20 #include "polly/DependenceInfo.h" 21 #include "polly/LinkAllPasses.h" 22 #include "polly/Options.h" 23 #include "polly/ScopDetection.h" 24 #include "polly/ScopInfo.h" 25 #include "polly/Support/SCEVValidator.h" 26 #include "llvm/ADT/PostOrderIterator.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/IR/IntrinsicsNVPTX.h" 29 #include "llvm/IR/LegacyPassManager.h" 30 #include "llvm/IR/Verifier.h" 31 #include "llvm/IRReader/IRReader.h" 32 #include "llvm/InitializePasses.h" 33 #include "llvm/Linker/Linker.h" 34 #include "llvm/Support/SourceMgr.h" 35 #include "llvm/Support/TargetRegistry.h" 36 #include "llvm/Target/TargetMachine.h" 37 #include "llvm/Transforms/IPO/PassManagerBuilder.h" 38 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 39 #include "isl/union_map.h" 40 41 extern "C" { 42 #include "ppcg/cuda.h" 43 #include "ppcg/gpu.h" 44 #include "ppcg/ppcg.h" 45 } 46 47 #include "llvm/Support/Debug.h" 48 49 using namespace polly; 50 using namespace llvm; 51 52 #define DEBUG_TYPE "polly-codegen-ppcg" 53 54 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule", 55 cl::desc("Dump the computed GPU Schedule"), 56 cl::Hidden, cl::init(false), cl::ZeroOrMore, 57 cl::cat(PollyCategory)); 58 59 static cl::opt<bool> 60 DumpCode("polly-acc-dump-code", 61 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden, 62 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 63 64 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir", 65 cl::desc("Dump the kernel LLVM-IR"), 66 cl::Hidden, cl::init(false), cl::ZeroOrMore, 67 cl::cat(PollyCategory)); 68 69 static cl::opt<bool> DumpKernelASM("polly-acc-dump-kernel-asm", 70 cl::desc("Dump the kernel assembly code"), 71 cl::Hidden, cl::init(false), cl::ZeroOrMore, 72 cl::cat(PollyCategory)); 73 74 static cl::opt<bool> FastMath("polly-acc-fastmath", 75 cl::desc("Allow unsafe math optimizations"), 76 cl::Hidden, cl::init(false), cl::ZeroOrMore, 77 cl::cat(PollyCategory)); 78 static cl::opt<bool> SharedMemory("polly-acc-use-shared", 79 cl::desc("Use shared memory"), cl::Hidden, 80 cl::init(false), cl::ZeroOrMore, 81 cl::cat(PollyCategory)); 82 static cl::opt<bool> PrivateMemory("polly-acc-use-private", 83 cl::desc("Use private memory"), cl::Hidden, 84 cl::init(false), cl::ZeroOrMore, 85 cl::cat(PollyCategory)); 86 87 bool polly::PollyManagedMemory; 88 static cl::opt<bool, true> 89 XManagedMemory("polly-acc-codegen-managed-memory", 90 cl::desc("Generate Host kernel code assuming" 91 " that all memory has been" 92 " declared as managed memory"), 93 cl::location(PollyManagedMemory), cl::Hidden, 94 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 95 96 static cl::opt<bool> 97 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure", 98 cl::desc("Fail and generate a backtrace if" 99 " verifyModule fails on the GPU " 100 " kernel module."), 101 cl::Hidden, cl::init(false), cl::ZeroOrMore, 102 cl::cat(PollyCategory)); 103 104 static cl::opt<std::string> CUDALibDevice( 105 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden, 106 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"), 107 cl::ZeroOrMore, cl::cat(PollyCategory)); 108 109 static cl::opt<std::string> 110 CudaVersion("polly-acc-cuda-version", 111 cl::desc("The CUDA version to compile for"), cl::Hidden, 112 cl::init("sm_30"), cl::ZeroOrMore, cl::cat(PollyCategory)); 113 114 static cl::opt<int> 115 MinCompute("polly-acc-mincompute", 116 cl::desc("Minimal number of compute statements to run on GPU."), 117 cl::Hidden, cl::init(10 * 512 * 512)); 118 119 extern bool polly::PerfMonitoring; 120 121 /// Return a unique name for a Scop, which is the scop region with the 122 /// function name. 123 std::string getUniqueScopName(const Scop *S) { 124 return "Scop Region: " + S->getNameStr() + 125 " | Function: " + std::string(S->getFunction().getName()); 126 } 127 128 /// Used to store information PPCG wants for kills. This information is 129 /// used by live range reordering. 130 /// 131 /// @see computeLiveRangeReordering 132 /// @see GPUNodeBuilder::createPPCGScop 133 /// @see GPUNodeBuilder::createPPCGProg 134 struct MustKillsInfo { 135 /// Collection of all kill statements that will be sequenced at the end of 136 /// PPCGScop->schedule. 137 /// 138 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set` 139 /// which merges schedules in *arbitrary* order. 140 /// (we don't care about the order of the kills anyway). 141 isl::schedule KillsSchedule; 142 /// Map from kill statement instances to scalars that need to be 143 /// killed. 144 /// 145 /// We currently derive kill information for: 146 /// 1. phi nodes. PHI nodes are not alive outside the scop and can 147 /// consequently all be killed. 148 /// 2. Scalar arrays that are not used outside the Scop. This is 149 /// checked by `isScalarUsesContainedInScop`. 150 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] } 151 isl::union_map TaggedMustKills; 152 153 /// Tagged must kills stripped of the tags. 154 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] } 155 isl::union_map MustKills; 156 157 MustKillsInfo() : KillsSchedule(nullptr) {} 158 }; 159 160 /// Check if SAI's uses are entirely contained within Scop S. 161 /// If a scalar is used only with a Scop, we are free to kill it, as no data 162 /// can flow in/out of the value any more. 163 /// @see computeMustKillsInfo 164 static bool isScalarUsesContainedInScop(const Scop &S, 165 const ScopArrayInfo *SAI) { 166 assert(SAI->isValueKind() && "this function only deals with scalars." 167 " Dealing with arrays required alias analysis"); 168 169 const Region &R = S.getRegion(); 170 for (User *U : SAI->getBasePtr()->users()) { 171 Instruction *I = dyn_cast<Instruction>(U); 172 assert(I && "invalid user of scop array info"); 173 if (!R.contains(I)) 174 return false; 175 } 176 return true; 177 } 178 179 /// Compute must-kills needed to enable live range reordering with PPCG. 180 /// 181 /// @params S The Scop to compute live range reordering information 182 /// @returns live range reordering information that can be used to setup 183 /// PPCG. 184 static MustKillsInfo computeMustKillsInfo(const Scop &S) { 185 const isl::space ParamSpace = S.getParamSpace(); 186 MustKillsInfo Info; 187 188 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria: 189 // 1.1 phi nodes in scop. 190 // 1.2 scalars that are only used within the scop 191 SmallVector<isl::id, 4> KillMemIds; 192 for (ScopArrayInfo *SAI : S.arrays()) { 193 if (SAI->isPHIKind() || 194 (SAI->isValueKind() && isScalarUsesContainedInScop(S, SAI))) 195 KillMemIds.push_back(isl::manage(SAI->getBasePtrId().release())); 196 } 197 198 Info.TaggedMustKills = isl::union_map::empty(ParamSpace); 199 Info.MustKills = isl::union_map::empty(ParamSpace); 200 201 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the 202 // schedule: 203 // - filter: "[control] -> { }" 204 // So, we choose to not create this to keep the output a little nicer, 205 // at the cost of some code complexity. 206 Info.KillsSchedule = nullptr; 207 208 for (isl::id &ToKillId : KillMemIds) { 209 isl::id KillStmtId = isl::id::alloc( 210 S.getIslCtx(), 211 std::string("SKill_phantom_").append(ToKillId.get_name()), nullptr); 212 213 // NOTE: construction of tagged_must_kill: 214 // 2. We need to construct a map: 215 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] } 216 // To construct this, we use `isl_map_domain_product` on 2 maps`: 217 // 2a. StmtToScalar: 218 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] } 219 // 2b. PhantomRefToScalar: 220 // [param] -> { ref_phantom[] -> scalar_to_kill[] } 221 // 222 // Combining these with `isl_map_domain_product` gives us 223 // TaggedMustKill: 224 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] } 225 226 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] } 227 isl::map StmtToScalar = isl::map::universe(ParamSpace); 228 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::in, isl::id(KillStmtId)); 229 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::out, isl::id(ToKillId)); 230 231 isl::id PhantomRefId = isl::id::alloc( 232 S.getIslCtx(), std::string("ref_phantom") + ToKillId.get_name(), 233 nullptr); 234 235 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] } 236 isl::map PhantomRefToScalar = isl::map::universe(ParamSpace); 237 PhantomRefToScalar = 238 PhantomRefToScalar.set_tuple_id(isl::dim::in, PhantomRefId); 239 PhantomRefToScalar = 240 PhantomRefToScalar.set_tuple_id(isl::dim::out, ToKillId); 241 242 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] } 243 isl::map TaggedMustKill = StmtToScalar.domain_product(PhantomRefToScalar); 244 Info.TaggedMustKills = Info.TaggedMustKills.unite(TaggedMustKill); 245 246 // 2. [param] -> { Stmt[] -> scalar_to_kill[] } 247 Info.MustKills = Info.TaggedMustKills.domain_factor_domain(); 248 249 // 3. Create the kill schedule of the form: 250 // "[param] -> { Stmt_phantom[] }" 251 // Then add this to Info.KillsSchedule. 252 isl::space KillStmtSpace = ParamSpace; 253 KillStmtSpace = KillStmtSpace.set_tuple_id(isl::dim::set, KillStmtId); 254 isl::union_set KillStmtDomain = isl::set::universe(KillStmtSpace); 255 256 isl::schedule KillSchedule = isl::schedule::from_domain(KillStmtDomain); 257 if (Info.KillsSchedule) 258 Info.KillsSchedule = isl::manage( 259 isl_schedule_set(Info.KillsSchedule.release(), KillSchedule.copy())); 260 else 261 Info.KillsSchedule = KillSchedule; 262 } 263 264 return Info; 265 } 266 267 /// Create the ast expressions for a ScopStmt. 268 /// 269 /// This function is a callback for to generate the ast expressions for each 270 /// of the scheduled ScopStmts. 271 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt( 272 void *StmtT, __isl_take isl_ast_build *Build_C, 273 isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA, 274 isl_id *Id, void *User), 275 void *UserIndex, 276 isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User), 277 void *UserExpr) { 278 279 ScopStmt *Stmt = (ScopStmt *)StmtT; 280 281 if (!Stmt || !Build_C) 282 return NULL; 283 284 isl::ast_build Build = isl::manage_copy(Build_C); 285 isl::ctx Ctx = Build.get_ctx(); 286 isl::id_to_ast_expr RefToExpr = isl::id_to_ast_expr::alloc(Ctx, 0); 287 288 Stmt->setAstBuild(Build); 289 290 for (MemoryAccess *Acc : *Stmt) { 291 isl::map AddrFunc = Acc->getAddressFunction(); 292 AddrFunc = AddrFunc.intersect_domain(Stmt->getDomain()); 293 294 isl::id RefId = Acc->getId(); 295 isl::pw_multi_aff PMA = isl::pw_multi_aff::from_map(AddrFunc); 296 297 isl::multi_pw_aff MPA = isl::multi_pw_aff(PMA); 298 MPA = MPA.coalesce(); 299 MPA = isl::manage(FunctionIndex(MPA.release(), RefId.get(), UserIndex)); 300 301 isl::ast_expr Access = Build.access_from(MPA); 302 Access = isl::manage(FunctionExpr(Access.release(), RefId.get(), UserExpr)); 303 RefToExpr = RefToExpr.set(RefId, Access); 304 } 305 306 return RefToExpr.release(); 307 } 308 309 /// Given a LLVM Type, compute its size in bytes, 310 static int computeSizeInBytes(const Type *T) { 311 int bytes = T->getPrimitiveSizeInBits() / 8; 312 if (bytes == 0) 313 bytes = T->getScalarSizeInBits() / 8; 314 return bytes; 315 } 316 317 /// Generate code for a GPU specific isl AST. 318 /// 319 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which 320 /// generates code for general-purpose AST nodes, with special functionality 321 /// for generating GPU specific user nodes. 322 /// 323 /// @see GPUNodeBuilder::createUser 324 class GPUNodeBuilder : public IslNodeBuilder { 325 public: 326 GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator, 327 const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE, 328 DominatorTree &DT, Scop &S, BasicBlock *StartBlock, 329 gpu_prog *Prog, GPURuntime Runtime, GPUArch Arch) 330 : IslNodeBuilder(Builder, Annotator, DL, LI, SE, DT, S, StartBlock), 331 Prog(Prog), Runtime(Runtime), Arch(Arch) { 332 getExprBuilder().setIDToSAI(&IDToSAI); 333 } 334 335 /// Create after-run-time-check initialization code. 336 void initializeAfterRTH(); 337 338 /// Finalize the generated scop. 339 virtual void finalize(); 340 341 /// Track if the full build process was successful. 342 /// 343 /// This value is set to false, if throughout the build process an error 344 /// occurred which prevents us from generating valid GPU code. 345 bool BuildSuccessful = true; 346 347 /// The maximal number of loops surrounding a sequential kernel. 348 unsigned DeepestSequential = 0; 349 350 /// The maximal number of loops surrounding a parallel kernel. 351 unsigned DeepestParallel = 0; 352 353 /// Return the name to set for the ptx_kernel. 354 std::string getKernelFuncName(int Kernel_id); 355 356 private: 357 /// A vector of array base pointers for which a new ScopArrayInfo was created. 358 /// 359 /// This vector is used to delete the ScopArrayInfo when it is not needed any 360 /// more. 361 std::vector<Value *> LocalArrays; 362 363 /// A map from ScopArrays to their corresponding device allocations. 364 std::map<ScopArrayInfo *, Value *> DeviceAllocations; 365 366 /// The current GPU context. 367 Value *GPUContext; 368 369 /// The set of isl_ids allocated in the kernel 370 std::vector<isl_id *> KernelIds; 371 372 /// A module containing GPU code. 373 /// 374 /// This pointer is only set in case we are currently generating GPU code. 375 std::unique_ptr<Module> GPUModule; 376 377 /// The GPU program we generate code for. 378 gpu_prog *Prog; 379 380 /// The GPU Runtime implementation to use (OpenCL or CUDA). 381 GPURuntime Runtime; 382 383 /// The GPU Architecture to target. 384 GPUArch Arch; 385 386 /// Class to free isl_ids. 387 class IslIdDeleter { 388 public: 389 void operator()(__isl_take isl_id *Id) { isl_id_free(Id); }; 390 }; 391 392 /// A set containing all isl_ids allocated in a GPU kernel. 393 /// 394 /// By releasing this set all isl_ids will be freed. 395 std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs; 396 397 IslExprBuilder::IDToScopArrayInfoTy IDToSAI; 398 399 /// Create code for user-defined AST nodes. 400 /// 401 /// These AST nodes can be of type: 402 /// 403 /// - ScopStmt: A computational statement (TODO) 404 /// - Kernel: A GPU kernel call (TODO) 405 /// - Data-Transfer: A GPU <-> CPU data-transfer 406 /// - In-kernel synchronization 407 /// - In-kernel memory copy statement 408 /// 409 /// @param UserStmt The ast node to generate code for. 410 virtual void createUser(__isl_take isl_ast_node *UserStmt); 411 412 virtual void createFor(__isl_take isl_ast_node *Node); 413 414 enum DataDirection { HOST_TO_DEVICE, DEVICE_TO_HOST }; 415 416 /// Create code for a data transfer statement 417 /// 418 /// @param TransferStmt The data transfer statement. 419 /// @param Direction The direction in which to transfer data. 420 void createDataTransfer(__isl_take isl_ast_node *TransferStmt, 421 enum DataDirection Direction); 422 423 /// Find llvm::Values referenced in GPU kernel. 424 /// 425 /// @param Kernel The kernel to scan for llvm::Values 426 /// 427 /// @returns A tuple, whose: 428 /// - First element contains the set of values referenced by the 429 /// kernel 430 /// - Second element contains the set of functions referenced by the 431 /// kernel. All functions in the set satisfy 432 /// `isValidFunctionInKernel`. 433 /// - Third element contains loops that have induction variables 434 /// which are used in the kernel, *and* these loops are *neither* 435 /// in the scop, nor do they immediately surroung the Scop. 436 /// See [Code generation of induction variables of loops outside 437 /// Scops] 438 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>, 439 isl::space> 440 getReferencesInKernel(ppcg_kernel *Kernel); 441 442 /// Compute the sizes of the execution grid for a given kernel. 443 /// 444 /// @param Kernel The kernel to compute grid sizes for. 445 /// 446 /// @returns A tuple with grid sizes for X and Y dimension 447 std::tuple<Value *, Value *> getGridSizes(ppcg_kernel *Kernel); 448 449 /// Get the managed array pointer for sending host pointers to the device. 450 /// \note 451 /// This is to be used only with managed memory 452 Value *getManagedDeviceArray(gpu_array_info *Array, ScopArrayInfo *ArrayInfo); 453 454 /// Compute the sizes of the thread blocks for a given kernel. 455 /// 456 /// @param Kernel The kernel to compute thread block sizes for. 457 /// 458 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions. 459 std::tuple<Value *, Value *, Value *> getBlockSizes(ppcg_kernel *Kernel); 460 461 /// Store a specific kernel launch parameter in the array of kernel launch 462 /// parameters. 463 /// 464 /// @param Parameters The list of parameters in which to store. 465 /// @param Param The kernel launch parameter to store. 466 /// @param Index The index in the parameter list, at which to store the 467 /// parameter. 468 void insertStoreParameter(Instruction *Parameters, Instruction *Param, 469 int Index); 470 471 /// Create kernel launch parameters. 472 /// 473 /// @param Kernel The kernel to create parameters for. 474 /// @param F The kernel function that has been created. 475 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 476 /// 477 /// @returns A stack allocated array with pointers to the parameter 478 /// values that are passed to the kernel. 479 Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F, 480 SetVector<Value *> SubtreeValues); 481 482 /// Create declarations for kernel variable. 483 /// 484 /// This includes shared memory declarations. 485 /// 486 /// @param Kernel The kernel definition to create variables for. 487 /// @param FN The function into which to generate the variables. 488 void createKernelVariables(ppcg_kernel *Kernel, Function *FN); 489 490 /// Add CUDA annotations to module. 491 /// 492 /// Add a set of CUDA annotations that declares the maximal block dimensions 493 /// that will be used to execute the CUDA kernel. This allows the NVIDIA 494 /// PTX compiler to bound the number of allocated registers to ensure the 495 /// resulting kernel is known to run with up to as many block dimensions 496 /// as specified here. 497 /// 498 /// @param M The module to add the annotations to. 499 /// @param BlockDimX The size of block dimension X. 500 /// @param BlockDimY The size of block dimension Y. 501 /// @param BlockDimZ The size of block dimension Z. 502 void addCUDAAnnotations(Module *M, Value *BlockDimX, Value *BlockDimY, 503 Value *BlockDimZ); 504 505 /// Create GPU kernel. 506 /// 507 /// Code generate the kernel described by @p KernelStmt. 508 /// 509 /// @param KernelStmt The ast node to generate kernel code for. 510 void createKernel(__isl_take isl_ast_node *KernelStmt); 511 512 /// Generate code that computes the size of an array. 513 /// 514 /// @param Array The array for which to compute a size. 515 Value *getArraySize(gpu_array_info *Array); 516 517 /// Generate code to compute the minimal offset at which an array is accessed. 518 /// 519 /// The offset of an array is the minimal array location accessed in a scop. 520 /// 521 /// Example: 522 /// 523 /// for (long i = 0; i < 100; i++) 524 /// A[i + 42] += ... 525 /// 526 /// getArrayOffset(A) results in 42. 527 /// 528 /// @param Array The array for which to compute the offset. 529 /// @returns An llvm::Value that contains the offset of the array. 530 Value *getArrayOffset(gpu_array_info *Array); 531 532 /// Prepare the kernel arguments for kernel code generation 533 /// 534 /// @param Kernel The kernel to generate code for. 535 /// @param FN The function created for the kernel. 536 void prepareKernelArguments(ppcg_kernel *Kernel, Function *FN); 537 538 /// Create kernel function. 539 /// 540 /// Create a kernel function located in a newly created module that can serve 541 /// as target for device code generation. Set the Builder to point to the 542 /// start block of this newly created function. 543 /// 544 /// @param Kernel The kernel to generate code for. 545 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 546 /// @param SubtreeFunctions The set of llvm::Functions referenced by this 547 /// kernel. 548 void createKernelFunction(ppcg_kernel *Kernel, 549 SetVector<Value *> &SubtreeValues, 550 SetVector<Function *> &SubtreeFunctions); 551 552 /// Create the declaration of a kernel function. 553 /// 554 /// The kernel function takes as arguments: 555 /// 556 /// - One i8 pointer for each external array reference used in the kernel. 557 /// - Host iterators 558 /// - Parameters 559 /// - Other LLVM Value references (TODO) 560 /// 561 /// @param Kernel The kernel to generate the function declaration for. 562 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 563 /// 564 /// @returns The newly declared function. 565 Function *createKernelFunctionDecl(ppcg_kernel *Kernel, 566 SetVector<Value *> &SubtreeValues); 567 568 /// Insert intrinsic functions to obtain thread and block ids. 569 /// 570 /// @param The kernel to generate the intrinsic functions for. 571 void insertKernelIntrinsics(ppcg_kernel *Kernel); 572 573 /// Insert function calls to retrieve the SPIR group/local ids. 574 /// 575 /// @param Kernel The kernel to generate the function calls for. 576 /// @param SizeTypeIs64Bit Whether size_t of the openCl device is 64bit. 577 void insertKernelCallsSPIR(ppcg_kernel *Kernel, bool SizeTypeIs64bit); 578 579 /// Setup the creation of functions referenced by the GPU kernel. 580 /// 581 /// 1. Create new function declarations in GPUModule which are the same as 582 /// SubtreeFunctions. 583 /// 584 /// 2. Populate IslNodeBuilder::ValueMap with mappings from 585 /// old functions (that come from the original module) to new functions 586 /// (that are created within GPUModule). That way, we generate references 587 /// to the correct function (in GPUModule) in BlockGenerator. 588 /// 589 /// @see IslNodeBuilder::ValueMap 590 /// @see BlockGenerator::GlobalMap 591 /// @see BlockGenerator::getNewValue 592 /// @see GPUNodeBuilder::getReferencesInKernel. 593 /// 594 /// @param SubtreeFunctions The set of llvm::Functions referenced by 595 /// this kernel. 596 void setupKernelSubtreeFunctions(SetVector<Function *> SubtreeFunctions); 597 598 /// Create a global-to-shared or shared-to-global copy statement. 599 /// 600 /// @param CopyStmt The copy statement to generate code for 601 void createKernelCopy(ppcg_kernel_stmt *CopyStmt); 602 603 /// Create code for a ScopStmt called in @p Expr. 604 /// 605 /// @param Expr The expression containing the call. 606 /// @param KernelStmt The kernel statement referenced in the call. 607 void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt); 608 609 /// Create an in-kernel synchronization call. 610 void createKernelSync(); 611 612 /// Create a PTX assembly string for the current GPU kernel. 613 /// 614 /// @returns A string containing the corresponding PTX assembly code. 615 std::string createKernelASM(); 616 617 /// Remove references from the dominator tree to the kernel function @p F. 618 /// 619 /// @param F The function to remove references to. 620 void clearDominators(Function *F); 621 622 /// Remove references from scalar evolution to the kernel function @p F. 623 /// 624 /// @param F The function to remove references to. 625 void clearScalarEvolution(Function *F); 626 627 /// Remove references from loop info to the kernel function @p F. 628 /// 629 /// @param F The function to remove references to. 630 void clearLoops(Function *F); 631 632 /// Check if the scop requires to be linked with CUDA's libdevice. 633 bool requiresCUDALibDevice(); 634 635 /// Link with the NVIDIA libdevice library (if needed and available). 636 void addCUDALibDevice(); 637 638 /// Finalize the generation of the kernel function. 639 /// 640 /// Free the LLVM-IR module corresponding to the kernel and -- if requested -- 641 /// dump its IR to stderr. 642 /// 643 /// @returns The Assembly string of the kernel. 644 std::string finalizeKernelFunction(); 645 646 /// Finalize the generation of the kernel arguments. 647 /// 648 /// This function ensures that not-read-only scalars used in a kernel are 649 /// stored back to the global memory location they are backed with before 650 /// the kernel terminates. 651 /// 652 /// @params Kernel The kernel to finalize kernel arguments for. 653 void finalizeKernelArguments(ppcg_kernel *Kernel); 654 655 /// Create code that allocates memory to store arrays on device. 656 void allocateDeviceArrays(); 657 658 /// Create code to prepare the managed device pointers. 659 void prepareManagedDeviceArrays(); 660 661 /// Free all allocated device arrays. 662 void freeDeviceArrays(); 663 664 /// Create a call to initialize the GPU context. 665 /// 666 /// @returns A pointer to the newly initialized context. 667 Value *createCallInitContext(); 668 669 /// Create a call to get the device pointer for a kernel allocation. 670 /// 671 /// @param Allocation The Polly GPU allocation 672 /// 673 /// @returns The device parameter corresponding to this allocation. 674 Value *createCallGetDevicePtr(Value *Allocation); 675 676 /// Create a call to free the GPU context. 677 /// 678 /// @param Context A pointer to an initialized GPU context. 679 void createCallFreeContext(Value *Context); 680 681 /// Create a call to allocate memory on the device. 682 /// 683 /// @param Size The size of memory to allocate 684 /// 685 /// @returns A pointer that identifies this allocation. 686 Value *createCallAllocateMemoryForDevice(Value *Size); 687 688 /// Create a call to free a device array. 689 /// 690 /// @param Array The device array to free. 691 void createCallFreeDeviceMemory(Value *Array); 692 693 /// Create a call to copy data from host to device. 694 /// 695 /// @param HostPtr A pointer to the host data that should be copied. 696 /// @param DevicePtr A device pointer specifying the location to copy to. 697 void createCallCopyFromHostToDevice(Value *HostPtr, Value *DevicePtr, 698 Value *Size); 699 700 /// Create a call to copy data from device to host. 701 /// 702 /// @param DevicePtr A pointer to the device data that should be copied. 703 /// @param HostPtr A host pointer specifying the location to copy to. 704 void createCallCopyFromDeviceToHost(Value *DevicePtr, Value *HostPtr, 705 Value *Size); 706 707 /// Create a call to synchronize Host & Device. 708 /// \note 709 /// This is to be used only with managed memory. 710 void createCallSynchronizeDevice(); 711 712 /// Create a call to get a kernel from an assembly string. 713 /// 714 /// @param Buffer The string describing the kernel. 715 /// @param Entry The name of the kernel function to call. 716 /// 717 /// @returns A pointer to a kernel object 718 Value *createCallGetKernel(Value *Buffer, Value *Entry); 719 720 /// Create a call to free a GPU kernel. 721 /// 722 /// @param GPUKernel THe kernel to free. 723 void createCallFreeKernel(Value *GPUKernel); 724 725 /// Create a call to launch a GPU kernel. 726 /// 727 /// @param GPUKernel The kernel to launch. 728 /// @param GridDimX The size of the first grid dimension. 729 /// @param GridDimY The size of the second grid dimension. 730 /// @param GridBlockX The size of the first block dimension. 731 /// @param GridBlockY The size of the second block dimension. 732 /// @param GridBlockZ The size of the third block dimension. 733 /// @param Parameters A pointer to an array that contains itself pointers to 734 /// the parameter values passed for each kernel argument. 735 void createCallLaunchKernel(Value *GPUKernel, Value *GridDimX, 736 Value *GridDimY, Value *BlockDimX, 737 Value *BlockDimY, Value *BlockDimZ, 738 Value *Parameters); 739 }; 740 741 std::string GPUNodeBuilder::getKernelFuncName(int Kernel_id) { 742 return "FUNC_" + S.getFunction().getName().str() + "_SCOP_" + 743 std::to_string(S.getID()) + "_KERNEL_" + std::to_string(Kernel_id); 744 } 745 746 void GPUNodeBuilder::initializeAfterRTH() { 747 BasicBlock *NewBB = SplitBlock(Builder.GetInsertBlock(), 748 &*Builder.GetInsertPoint(), &DT, &LI); 749 NewBB->setName("polly.acc.initialize"); 750 Builder.SetInsertPoint(&NewBB->front()); 751 752 GPUContext = createCallInitContext(); 753 754 if (!PollyManagedMemory) 755 allocateDeviceArrays(); 756 else 757 prepareManagedDeviceArrays(); 758 } 759 760 void GPUNodeBuilder::finalize() { 761 if (!PollyManagedMemory) 762 freeDeviceArrays(); 763 764 createCallFreeContext(GPUContext); 765 IslNodeBuilder::finalize(); 766 } 767 768 void GPUNodeBuilder::allocateDeviceArrays() { 769 assert(!PollyManagedMemory && 770 "Managed memory will directly send host pointers " 771 "to the kernel. There is no need for device arrays"); 772 isl_ast_build *Build = isl_ast_build_from_context(S.getContext().release()); 773 774 for (int i = 0; i < Prog->n_array; ++i) { 775 gpu_array_info *Array = &Prog->array[i]; 776 auto *ScopArray = (ScopArrayInfo *)Array->user; 777 std::string DevArrayName("p_dev_array_"); 778 DevArrayName.append(Array->name); 779 780 Value *ArraySize = getArraySize(Array); 781 Value *Offset = getArrayOffset(Array); 782 if (Offset) 783 ArraySize = Builder.CreateSub( 784 ArraySize, 785 Builder.CreateMul(Offset, 786 Builder.getInt64(ScopArray->getElemSizeInBytes()))); 787 const SCEV *SizeSCEV = SE.getSCEV(ArraySize); 788 // It makes no sense to have an array of size 0. The CUDA API will 789 // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We 790 // choose to be defensive and catch this at the compile phase. It is 791 // most likely that we are doing something wrong with size computation. 792 if (SizeSCEV->isZero()) { 793 errs() << getUniqueScopName(&S) 794 << " has computed array size 0: " << *ArraySize 795 << " | for array: " << *(ScopArray->getBasePtr()) 796 << ". This is illegal, exiting.\n"; 797 report_fatal_error("array size was computed to be 0"); 798 } 799 800 Value *DevArray = createCallAllocateMemoryForDevice(ArraySize); 801 DevArray->setName(DevArrayName); 802 DeviceAllocations[ScopArray] = DevArray; 803 } 804 805 isl_ast_build_free(Build); 806 } 807 808 void GPUNodeBuilder::prepareManagedDeviceArrays() { 809 assert(PollyManagedMemory && 810 "Device array most only be prepared in managed-memory mode"); 811 for (int i = 0; i < Prog->n_array; ++i) { 812 gpu_array_info *Array = &Prog->array[i]; 813 ScopArrayInfo *ScopArray = (ScopArrayInfo *)Array->user; 814 Value *HostPtr; 815 816 if (gpu_array_is_scalar(Array)) 817 HostPtr = BlockGen.getOrCreateAlloca(ScopArray); 818 else 819 HostPtr = ScopArray->getBasePtr(); 820 HostPtr = getLatestValue(HostPtr); 821 822 Value *Offset = getArrayOffset(Array); 823 if (Offset) { 824 HostPtr = Builder.CreatePointerCast( 825 HostPtr, ScopArray->getElementType()->getPointerTo()); 826 HostPtr = Builder.CreateGEP(HostPtr, Offset); 827 } 828 829 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy()); 830 DeviceAllocations[ScopArray] = HostPtr; 831 } 832 } 833 834 void GPUNodeBuilder::addCUDAAnnotations(Module *M, Value *BlockDimX, 835 Value *BlockDimY, Value *BlockDimZ) { 836 auto AnnotationNode = M->getOrInsertNamedMetadata("nvvm.annotations"); 837 838 for (auto &F : *M) { 839 if (F.getCallingConv() != CallingConv::PTX_Kernel) 840 continue; 841 842 Value *V[] = {BlockDimX, BlockDimY, BlockDimZ}; 843 844 Metadata *Elements[] = { 845 ValueAsMetadata::get(&F), MDString::get(M->getContext(), "maxntidx"), 846 ValueAsMetadata::get(V[0]), MDString::get(M->getContext(), "maxntidy"), 847 ValueAsMetadata::get(V[1]), MDString::get(M->getContext(), "maxntidz"), 848 ValueAsMetadata::get(V[2]), 849 }; 850 MDNode *Node = MDNode::get(M->getContext(), Elements); 851 AnnotationNode->addOperand(Node); 852 } 853 } 854 855 void GPUNodeBuilder::freeDeviceArrays() { 856 assert(!PollyManagedMemory && "Managed memory does not use device arrays"); 857 for (auto &Array : DeviceAllocations) 858 createCallFreeDeviceMemory(Array.second); 859 } 860 861 Value *GPUNodeBuilder::createCallGetKernel(Value *Buffer, Value *Entry) { 862 const char *Name = "polly_getKernel"; 863 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 864 Function *F = M->getFunction(Name); 865 866 // If F is not available, declare it. 867 if (!F) { 868 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 869 std::vector<Type *> Args; 870 Args.push_back(Builder.getInt8PtrTy()); 871 Args.push_back(Builder.getInt8PtrTy()); 872 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 873 F = Function::Create(Ty, Linkage, Name, M); 874 } 875 876 return Builder.CreateCall(F, {Buffer, Entry}); 877 } 878 879 Value *GPUNodeBuilder::createCallGetDevicePtr(Value *Allocation) { 880 const char *Name = "polly_getDevicePtr"; 881 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 882 Function *F = M->getFunction(Name); 883 884 // If F is not available, declare it. 885 if (!F) { 886 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 887 std::vector<Type *> Args; 888 Args.push_back(Builder.getInt8PtrTy()); 889 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 890 F = Function::Create(Ty, Linkage, Name, M); 891 } 892 893 return Builder.CreateCall(F, {Allocation}); 894 } 895 896 void GPUNodeBuilder::createCallLaunchKernel(Value *GPUKernel, Value *GridDimX, 897 Value *GridDimY, Value *BlockDimX, 898 Value *BlockDimY, Value *BlockDimZ, 899 Value *Parameters) { 900 const char *Name = "polly_launchKernel"; 901 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 902 Function *F = M->getFunction(Name); 903 904 // If F is not available, declare it. 905 if (!F) { 906 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 907 std::vector<Type *> Args; 908 Args.push_back(Builder.getInt8PtrTy()); 909 Args.push_back(Builder.getInt32Ty()); 910 Args.push_back(Builder.getInt32Ty()); 911 Args.push_back(Builder.getInt32Ty()); 912 Args.push_back(Builder.getInt32Ty()); 913 Args.push_back(Builder.getInt32Ty()); 914 Args.push_back(Builder.getInt8PtrTy()); 915 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 916 F = Function::Create(Ty, Linkage, Name, M); 917 } 918 919 Builder.CreateCall(F, {GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY, 920 BlockDimZ, Parameters}); 921 } 922 923 void GPUNodeBuilder::createCallFreeKernel(Value *GPUKernel) { 924 const char *Name = "polly_freeKernel"; 925 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 926 Function *F = M->getFunction(Name); 927 928 // If F is not available, declare it. 929 if (!F) { 930 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 931 std::vector<Type *> Args; 932 Args.push_back(Builder.getInt8PtrTy()); 933 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 934 F = Function::Create(Ty, Linkage, Name, M); 935 } 936 937 Builder.CreateCall(F, {GPUKernel}); 938 } 939 940 void GPUNodeBuilder::createCallFreeDeviceMemory(Value *Array) { 941 assert(!PollyManagedMemory && 942 "Managed memory does not allocate or free memory " 943 "for device"); 944 const char *Name = "polly_freeDeviceMemory"; 945 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 946 Function *F = M->getFunction(Name); 947 948 // If F is not available, declare it. 949 if (!F) { 950 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 951 std::vector<Type *> Args; 952 Args.push_back(Builder.getInt8PtrTy()); 953 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 954 F = Function::Create(Ty, Linkage, Name, M); 955 } 956 957 Builder.CreateCall(F, {Array}); 958 } 959 960 Value *GPUNodeBuilder::createCallAllocateMemoryForDevice(Value *Size) { 961 assert(!PollyManagedMemory && 962 "Managed memory does not allocate or free memory " 963 "for device"); 964 const char *Name = "polly_allocateMemoryForDevice"; 965 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 966 Function *F = M->getFunction(Name); 967 968 // If F is not available, declare it. 969 if (!F) { 970 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 971 std::vector<Type *> Args; 972 Args.push_back(Builder.getInt64Ty()); 973 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 974 F = Function::Create(Ty, Linkage, Name, M); 975 } 976 977 return Builder.CreateCall(F, {Size}); 978 } 979 980 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value *HostData, 981 Value *DeviceData, 982 Value *Size) { 983 assert(!PollyManagedMemory && 984 "Managed memory does not transfer memory between " 985 "device and host"); 986 const char *Name = "polly_copyFromHostToDevice"; 987 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 988 Function *F = M->getFunction(Name); 989 990 // If F is not available, declare it. 991 if (!F) { 992 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 993 std::vector<Type *> Args; 994 Args.push_back(Builder.getInt8PtrTy()); 995 Args.push_back(Builder.getInt8PtrTy()); 996 Args.push_back(Builder.getInt64Ty()); 997 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 998 F = Function::Create(Ty, Linkage, Name, M); 999 } 1000 1001 Builder.CreateCall(F, {HostData, DeviceData, Size}); 1002 } 1003 1004 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value *DeviceData, 1005 Value *HostData, 1006 Value *Size) { 1007 assert(!PollyManagedMemory && 1008 "Managed memory does not transfer memory between " 1009 "device and host"); 1010 const char *Name = "polly_copyFromDeviceToHost"; 1011 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1012 Function *F = M->getFunction(Name); 1013 1014 // If F is not available, declare it. 1015 if (!F) { 1016 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1017 std::vector<Type *> Args; 1018 Args.push_back(Builder.getInt8PtrTy()); 1019 Args.push_back(Builder.getInt8PtrTy()); 1020 Args.push_back(Builder.getInt64Ty()); 1021 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1022 F = Function::Create(Ty, Linkage, Name, M); 1023 } 1024 1025 Builder.CreateCall(F, {DeviceData, HostData, Size}); 1026 } 1027 1028 void GPUNodeBuilder::createCallSynchronizeDevice() { 1029 assert(PollyManagedMemory && "explicit synchronization is only necessary for " 1030 "managed memory"); 1031 const char *Name = "polly_synchronizeDevice"; 1032 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1033 Function *F = M->getFunction(Name); 1034 1035 // If F is not available, declare it. 1036 if (!F) { 1037 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1038 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), false); 1039 F = Function::Create(Ty, Linkage, Name, M); 1040 } 1041 1042 Builder.CreateCall(F); 1043 } 1044 1045 Value *GPUNodeBuilder::createCallInitContext() { 1046 const char *Name; 1047 1048 switch (Runtime) { 1049 case GPURuntime::CUDA: 1050 Name = "polly_initContextCUDA"; 1051 break; 1052 case GPURuntime::OpenCL: 1053 Name = "polly_initContextCL"; 1054 break; 1055 } 1056 1057 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1058 Function *F = M->getFunction(Name); 1059 1060 // If F is not available, declare it. 1061 if (!F) { 1062 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1063 std::vector<Type *> Args; 1064 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 1065 F = Function::Create(Ty, Linkage, Name, M); 1066 } 1067 1068 return Builder.CreateCall(F, {}); 1069 } 1070 1071 void GPUNodeBuilder::createCallFreeContext(Value *Context) { 1072 const char *Name = "polly_freeContext"; 1073 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1074 Function *F = M->getFunction(Name); 1075 1076 // If F is not available, declare it. 1077 if (!F) { 1078 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1079 std::vector<Type *> Args; 1080 Args.push_back(Builder.getInt8PtrTy()); 1081 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1082 F = Function::Create(Ty, Linkage, Name, M); 1083 } 1084 1085 Builder.CreateCall(F, {Context}); 1086 } 1087 1088 /// Check if one string is a prefix of another. 1089 /// 1090 /// @param String The string in which to look for the prefix. 1091 /// @param Prefix The prefix to look for. 1092 static bool isPrefix(std::string String, std::string Prefix) { 1093 return String.find(Prefix) == 0; 1094 } 1095 1096 Value *GPUNodeBuilder::getArraySize(gpu_array_info *Array) { 1097 isl::ast_build Build = isl::ast_build::from_context(S.getContext()); 1098 Value *ArraySize = ConstantInt::get(Builder.getInt64Ty(), Array->size); 1099 1100 if (!gpu_array_is_scalar(Array)) { 1101 isl::multi_pw_aff ArrayBound = isl::manage_copy(Array->bound); 1102 1103 isl::pw_aff OffsetDimZero = ArrayBound.get_pw_aff(0); 1104 isl::ast_expr Res = Build.expr_from(OffsetDimZero); 1105 1106 for (unsigned int i = 1; i < Array->n_index; i++) { 1107 isl::pw_aff Bound_I = ArrayBound.get_pw_aff(i); 1108 isl::ast_expr Expr = Build.expr_from(Bound_I); 1109 Res = Res.mul(Expr); 1110 } 1111 1112 Value *NumElements = ExprBuilder.create(Res.release()); 1113 if (NumElements->getType() != ArraySize->getType()) 1114 NumElements = Builder.CreateSExt(NumElements, ArraySize->getType()); 1115 ArraySize = Builder.CreateMul(ArraySize, NumElements); 1116 } 1117 return ArraySize; 1118 } 1119 1120 Value *GPUNodeBuilder::getArrayOffset(gpu_array_info *Array) { 1121 if (gpu_array_is_scalar(Array)) 1122 return nullptr; 1123 1124 isl::ast_build Build = isl::ast_build::from_context(S.getContext()); 1125 1126 isl::set Min = isl::manage_copy(Array->extent).lexmin(); 1127 1128 isl::set ZeroSet = isl::set::universe(Min.get_space()); 1129 1130 for (long i = 0, n = Min.dim(isl::dim::set); i < n; i++) 1131 ZeroSet = ZeroSet.fix_si(isl::dim::set, i, 0); 1132 1133 if (Min.is_subset(ZeroSet)) { 1134 return nullptr; 1135 } 1136 1137 isl::ast_expr Result = isl::ast_expr::from_val(isl::val(Min.get_ctx(), 0)); 1138 1139 for (long i = 0, n = Min.dim(isl::dim::set); i < n; i++) { 1140 if (i > 0) { 1141 isl::pw_aff Bound_I = 1142 isl::manage(isl_multi_pw_aff_get_pw_aff(Array->bound, i - 1)); 1143 isl::ast_expr BExpr = Build.expr_from(Bound_I); 1144 Result = Result.mul(BExpr); 1145 } 1146 isl::pw_aff DimMin = Min.dim_min(i); 1147 isl::ast_expr MExpr = Build.expr_from(DimMin); 1148 Result = Result.add(MExpr); 1149 } 1150 1151 return ExprBuilder.create(Result.release()); 1152 } 1153 1154 Value *GPUNodeBuilder::getManagedDeviceArray(gpu_array_info *Array, 1155 ScopArrayInfo *ArrayInfo) { 1156 assert(PollyManagedMemory && "Only used when you wish to get a host " 1157 "pointer for sending data to the kernel, " 1158 "with managed memory"); 1159 std::map<ScopArrayInfo *, Value *>::iterator it; 1160 it = DeviceAllocations.find(ArrayInfo); 1161 assert(it != DeviceAllocations.end() && 1162 "Device array expected to be available"); 1163 return it->second; 1164 } 1165 1166 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node *TransferStmt, 1167 enum DataDirection Direction) { 1168 assert(!PollyManagedMemory && "Managed memory needs no data transfers"); 1169 isl_ast_expr *Expr = isl_ast_node_user_get_expr(TransferStmt); 1170 isl_ast_expr *Arg = isl_ast_expr_get_op_arg(Expr, 0); 1171 isl_id *Id = isl_ast_expr_get_id(Arg); 1172 auto Array = (gpu_array_info *)isl_id_get_user(Id); 1173 auto ScopArray = (ScopArrayInfo *)(Array->user); 1174 1175 Value *Size = getArraySize(Array); 1176 Value *Offset = getArrayOffset(Array); 1177 Value *DevPtr = DeviceAllocations[ScopArray]; 1178 1179 Value *HostPtr; 1180 1181 if (gpu_array_is_scalar(Array)) 1182 HostPtr = BlockGen.getOrCreateAlloca(ScopArray); 1183 else 1184 HostPtr = ScopArray->getBasePtr(); 1185 HostPtr = getLatestValue(HostPtr); 1186 1187 if (Offset) { 1188 HostPtr = Builder.CreatePointerCast( 1189 HostPtr, ScopArray->getElementType()->getPointerTo()); 1190 HostPtr = Builder.CreateGEP(HostPtr, Offset); 1191 } 1192 1193 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy()); 1194 1195 if (Offset) { 1196 Size = Builder.CreateSub( 1197 Size, Builder.CreateMul( 1198 Offset, Builder.getInt64(ScopArray->getElemSizeInBytes()))); 1199 } 1200 1201 if (Direction == HOST_TO_DEVICE) 1202 createCallCopyFromHostToDevice(HostPtr, DevPtr, Size); 1203 else 1204 createCallCopyFromDeviceToHost(DevPtr, HostPtr, Size); 1205 1206 isl_id_free(Id); 1207 isl_ast_expr_free(Arg); 1208 isl_ast_expr_free(Expr); 1209 isl_ast_node_free(TransferStmt); 1210 } 1211 1212 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) { 1213 isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt); 1214 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0); 1215 isl_id *Id = isl_ast_expr_get_id(StmtExpr); 1216 isl_id_free(Id); 1217 isl_ast_expr_free(StmtExpr); 1218 1219 const char *Str = isl_id_get_name(Id); 1220 if (!strcmp(Str, "kernel")) { 1221 createKernel(UserStmt); 1222 if (PollyManagedMemory) 1223 createCallSynchronizeDevice(); 1224 isl_ast_expr_free(Expr); 1225 return; 1226 } 1227 if (!strcmp(Str, "init_device")) { 1228 initializeAfterRTH(); 1229 isl_ast_node_free(UserStmt); 1230 isl_ast_expr_free(Expr); 1231 return; 1232 } 1233 if (!strcmp(Str, "clear_device")) { 1234 finalize(); 1235 isl_ast_node_free(UserStmt); 1236 isl_ast_expr_free(Expr); 1237 return; 1238 } 1239 if (isPrefix(Str, "to_device")) { 1240 if (!PollyManagedMemory) 1241 createDataTransfer(UserStmt, HOST_TO_DEVICE); 1242 else 1243 isl_ast_node_free(UserStmt); 1244 1245 isl_ast_expr_free(Expr); 1246 return; 1247 } 1248 1249 if (isPrefix(Str, "from_device")) { 1250 if (!PollyManagedMemory) { 1251 createDataTransfer(UserStmt, DEVICE_TO_HOST); 1252 } else { 1253 isl_ast_node_free(UserStmt); 1254 } 1255 isl_ast_expr_free(Expr); 1256 return; 1257 } 1258 1259 isl_id *Anno = isl_ast_node_get_annotation(UserStmt); 1260 struct ppcg_kernel_stmt *KernelStmt = 1261 (struct ppcg_kernel_stmt *)isl_id_get_user(Anno); 1262 isl_id_free(Anno); 1263 1264 switch (KernelStmt->type) { 1265 case ppcg_kernel_domain: 1266 createScopStmt(Expr, KernelStmt); 1267 isl_ast_node_free(UserStmt); 1268 return; 1269 case ppcg_kernel_copy: 1270 createKernelCopy(KernelStmt); 1271 isl_ast_expr_free(Expr); 1272 isl_ast_node_free(UserStmt); 1273 return; 1274 case ppcg_kernel_sync: 1275 createKernelSync(); 1276 isl_ast_expr_free(Expr); 1277 isl_ast_node_free(UserStmt); 1278 return; 1279 } 1280 1281 isl_ast_expr_free(Expr); 1282 isl_ast_node_free(UserStmt); 1283 } 1284 1285 void GPUNodeBuilder::createFor(__isl_take isl_ast_node *Node) { 1286 createForSequential(isl::manage(Node), false); 1287 } 1288 1289 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) { 1290 isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index); 1291 LocalIndex = isl_ast_expr_address_of(LocalIndex); 1292 Value *LocalAddr = ExprBuilder.create(LocalIndex); 1293 isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index); 1294 Index = isl_ast_expr_address_of(Index); 1295 Value *GlobalAddr = ExprBuilder.create(Index); 1296 1297 if (KernelStmt->u.c.read) { 1298 LoadInst *Load = Builder.CreateLoad(GlobalAddr, "shared.read"); 1299 Builder.CreateStore(Load, LocalAddr); 1300 } else { 1301 LoadInst *Load = Builder.CreateLoad(LocalAddr, "shared.write"); 1302 Builder.CreateStore(Load, GlobalAddr); 1303 } 1304 } 1305 1306 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr, 1307 ppcg_kernel_stmt *KernelStmt) { 1308 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt; 1309 isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr; 1310 1311 LoopToScevMapT LTS; 1312 LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end()); 1313 1314 createSubstitutions(Expr, Stmt, LTS); 1315 1316 if (Stmt->isBlockStmt()) 1317 BlockGen.copyStmt(*Stmt, LTS, Indexes); 1318 else 1319 RegionGen.copyStmt(*Stmt, LTS, Indexes); 1320 } 1321 1322 void GPUNodeBuilder::createKernelSync() { 1323 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1324 const char *SpirName = "__gen_ocl_barrier_global"; 1325 1326 Function *Sync; 1327 1328 switch (Arch) { 1329 case GPUArch::SPIR64: 1330 case GPUArch::SPIR32: 1331 Sync = M->getFunction(SpirName); 1332 1333 // If Sync is not available, declare it. 1334 if (!Sync) { 1335 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1336 std::vector<Type *> Args; 1337 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1338 Sync = Function::Create(Ty, Linkage, SpirName, M); 1339 Sync->setCallingConv(CallingConv::SPIR_FUNC); 1340 } 1341 break; 1342 case GPUArch::NVPTX64: 1343 Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0); 1344 break; 1345 } 1346 1347 Builder.CreateCall(Sync, {}); 1348 } 1349 1350 /// Collect llvm::Values referenced from @p Node 1351 /// 1352 /// This function only applies to isl_ast_nodes that are user_nodes referring 1353 /// to a ScopStmt. All other node types are ignore. 1354 /// 1355 /// @param Node The node to collect references for. 1356 /// @param User A user pointer used as storage for the data that is collected. 1357 /// 1358 /// @returns isl_bool_true if data could be collected successfully. 1359 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) { 1360 if (isl_ast_node_get_type(Node) != isl_ast_node_user) 1361 return isl_bool_true; 1362 1363 isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node); 1364 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0); 1365 isl_id *Id = isl_ast_expr_get_id(StmtExpr); 1366 const char *Str = isl_id_get_name(Id); 1367 isl_id_free(Id); 1368 isl_ast_expr_free(StmtExpr); 1369 isl_ast_expr_free(Expr); 1370 1371 if (!isPrefix(Str, "Stmt")) 1372 return isl_bool_true; 1373 1374 Id = isl_ast_node_get_annotation(Node); 1375 auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id); 1376 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt; 1377 isl_id_free(Id); 1378 1379 addReferencesFromStmt(Stmt, User, false /* CreateScalarRefs */); 1380 1381 return isl_bool_true; 1382 } 1383 1384 /// A list of functions that are available in NVIDIA's libdevice. 1385 const std::set<std::string> CUDALibDeviceFunctions = { 1386 "exp", "expf", "expl", "cos", "cosf", "sqrt", "sqrtf", 1387 "copysign", "copysignf", "copysignl", "log", "logf", "powi", "powif"}; 1388 1389 // A map from intrinsics to their corresponding libdevice functions. 1390 const std::map<std::string, std::string> IntrinsicToLibdeviceFunc = { 1391 {"llvm.exp.f64", "exp"}, 1392 {"llvm.exp.f32", "expf"}, 1393 {"llvm.powi.f64", "powi"}, 1394 {"llvm.powi.f32", "powif"}}; 1395 1396 /// Return the corresponding CUDA libdevice function name @p Name. 1397 /// Note that this function will try to convert instrinsics in the list 1398 /// IntrinsicToLibdeviceFunc into libdevice functions. 1399 /// This is because some intrinsics such as `exp` 1400 /// are not supported by the NVPTX backend. 1401 /// If this restriction of the backend is lifted, we should refactor our code 1402 /// so that we use intrinsics whenever possible. 1403 /// 1404 /// Return "" if we are not compiling for CUDA. 1405 std::string getCUDALibDeviceFuntion(StringRef Name) { 1406 auto It = IntrinsicToLibdeviceFunc.find(Name); 1407 if (It != IntrinsicToLibdeviceFunc.end()) 1408 return getCUDALibDeviceFuntion(It->second); 1409 1410 if (CUDALibDeviceFunctions.count(Name)) 1411 return ("__nv_" + Name).str(); 1412 1413 return ""; 1414 } 1415 1416 /// Check if F is a function that we can code-generate in a GPU kernel. 1417 static bool isValidFunctionInKernel(llvm::Function *F, bool AllowLibDevice) { 1418 assert(F && "F is an invalid pointer"); 1419 // We string compare against the name of the function to allow 1420 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and 1421 // "llvm.copysign". 1422 const StringRef Name = F->getName(); 1423 1424 if (AllowLibDevice && getCUDALibDeviceFuntion(Name).length() > 0) 1425 return true; 1426 1427 return F->isIntrinsic() && 1428 (Name.startswith("llvm.sqrt") || Name.startswith("llvm.fabs") || 1429 Name.startswith("llvm.copysign")); 1430 } 1431 1432 /// Do not take `Function` as a subtree value. 1433 /// 1434 /// We try to take the reference of all subtree values and pass them along 1435 /// to the kernel from the host. Taking an address of any function and 1436 /// trying to pass along is nonsensical. Only allow `Value`s that are not 1437 /// `Function`s. 1438 static bool isValidSubtreeValue(llvm::Value *V) { return !isa<Function>(V); } 1439 1440 /// Return `Function`s from `RawSubtreeValues`. 1441 static SetVector<Function *> 1442 getFunctionsFromRawSubtreeValues(SetVector<Value *> RawSubtreeValues, 1443 bool AllowCUDALibDevice) { 1444 SetVector<Function *> SubtreeFunctions; 1445 for (Value *It : RawSubtreeValues) { 1446 Function *F = dyn_cast<Function>(It); 1447 if (F) { 1448 assert(isValidFunctionInKernel(F, AllowCUDALibDevice) && 1449 "Code should have bailed out by " 1450 "this point if an invalid function " 1451 "were present in a kernel."); 1452 SubtreeFunctions.insert(F); 1453 } 1454 } 1455 return SubtreeFunctions; 1456 } 1457 1458 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>, 1459 isl::space> 1460 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) { 1461 SetVector<Value *> SubtreeValues; 1462 SetVector<const SCEV *> SCEVs; 1463 SetVector<const Loop *> Loops; 1464 isl::space ParamSpace = isl::space(S.getIslCtx(), 0, 0).params(); 1465 SubtreeReferences References = { 1466 LI, SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator(), 1467 &ParamSpace}; 1468 1469 for (const auto &I : IDToValue) 1470 SubtreeValues.insert(I.second); 1471 1472 // NOTE: this is populated in IslNodeBuilder::addParameters 1473 // See [Code generation of induction variables of loops outside Scops]. 1474 for (const auto &I : OutsideLoopIterations) 1475 SubtreeValues.insert(cast<SCEVUnknown>(I.second)->getValue()); 1476 1477 isl_ast_node_foreach_descendant_top_down( 1478 Kernel->tree, collectReferencesInGPUStmt, &References); 1479 1480 for (const SCEV *Expr : SCEVs) { 1481 findValues(Expr, SE, SubtreeValues); 1482 findLoops(Expr, Loops); 1483 } 1484 1485 Loops.remove_if([this](const Loop *L) { 1486 return S.contains(L) || L->contains(S.getEntry()); 1487 }); 1488 1489 for (auto &SAI : S.arrays()) 1490 SubtreeValues.remove(SAI->getBasePtr()); 1491 1492 isl_space *Space = S.getParamSpace().release(); 1493 for (long i = 0, n = isl_space_dim(Space, isl_dim_param); i < n; i++) { 1494 isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i); 1495 assert(IDToValue.count(Id)); 1496 Value *Val = IDToValue[Id]; 1497 SubtreeValues.remove(Val); 1498 isl_id_free(Id); 1499 } 1500 isl_space_free(Space); 1501 1502 for (long i = 0, n = isl_space_dim(Kernel->space, isl_dim_set); i < n; i++) { 1503 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 1504 assert(IDToValue.count(Id)); 1505 Value *Val = IDToValue[Id]; 1506 SubtreeValues.remove(Val); 1507 isl_id_free(Id); 1508 } 1509 1510 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions 1511 // SubtreeValues. This is important, because we should not lose any 1512 // SubtreeValues in the process of constructing the 1513 // "ValidSubtree{Values, Functions} sets. Nor should the set 1514 // ValidSubtree{Values, Functions} have any common element. 1515 auto ValidSubtreeValuesIt = 1516 make_filter_range(SubtreeValues, isValidSubtreeValue); 1517 SetVector<Value *> ValidSubtreeValues(ValidSubtreeValuesIt.begin(), 1518 ValidSubtreeValuesIt.end()); 1519 1520 bool AllowCUDALibDevice = Arch == GPUArch::NVPTX64; 1521 1522 SetVector<Function *> ValidSubtreeFunctions( 1523 getFunctionsFromRawSubtreeValues(SubtreeValues, AllowCUDALibDevice)); 1524 1525 // @see IslNodeBuilder::getReferencesInSubtree 1526 SetVector<Value *> ReplacedValues; 1527 for (Value *V : ValidSubtreeValues) { 1528 auto It = ValueMap.find(V); 1529 if (It == ValueMap.end()) 1530 ReplacedValues.insert(V); 1531 else 1532 ReplacedValues.insert(It->second); 1533 } 1534 return std::make_tuple(ReplacedValues, ValidSubtreeFunctions, Loops, 1535 ParamSpace); 1536 } 1537 1538 void GPUNodeBuilder::clearDominators(Function *F) { 1539 DomTreeNode *N = DT.getNode(&F->getEntryBlock()); 1540 std::vector<BasicBlock *> Nodes; 1541 for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I) 1542 Nodes.push_back(I->getBlock()); 1543 1544 for (BasicBlock *BB : Nodes) 1545 DT.eraseNode(BB); 1546 } 1547 1548 void GPUNodeBuilder::clearScalarEvolution(Function *F) { 1549 for (BasicBlock &BB : *F) { 1550 Loop *L = LI.getLoopFor(&BB); 1551 if (L) 1552 SE.forgetLoop(L); 1553 } 1554 } 1555 1556 void GPUNodeBuilder::clearLoops(Function *F) { 1557 SmallSet<Loop *, 1> WorkList; 1558 for (BasicBlock &BB : *F) { 1559 Loop *L = LI.getLoopFor(&BB); 1560 if (L) 1561 WorkList.insert(L); 1562 } 1563 for (auto *L : WorkList) 1564 LI.erase(L); 1565 } 1566 1567 std::tuple<Value *, Value *> GPUNodeBuilder::getGridSizes(ppcg_kernel *Kernel) { 1568 std::vector<Value *> Sizes; 1569 isl::ast_build Context = isl::ast_build::from_context(S.getContext()); 1570 1571 isl::multi_pw_aff GridSizePwAffs = isl::manage_copy(Kernel->grid_size); 1572 for (long i = 0; i < Kernel->n_grid; i++) { 1573 isl::pw_aff Size = GridSizePwAffs.get_pw_aff(i); 1574 isl::ast_expr GridSize = Context.expr_from(Size); 1575 Value *Res = ExprBuilder.create(GridSize.release()); 1576 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty()); 1577 Sizes.push_back(Res); 1578 } 1579 1580 for (long i = Kernel->n_grid; i < 3; i++) 1581 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1)); 1582 1583 return std::make_tuple(Sizes[0], Sizes[1]); 1584 } 1585 1586 std::tuple<Value *, Value *, Value *> 1587 GPUNodeBuilder::getBlockSizes(ppcg_kernel *Kernel) { 1588 std::vector<Value *> Sizes; 1589 1590 for (long i = 0; i < Kernel->n_block; i++) { 1591 Value *Res = ConstantInt::get(Builder.getInt32Ty(), Kernel->block_dim[i]); 1592 Sizes.push_back(Res); 1593 } 1594 1595 for (long i = Kernel->n_block; i < 3; i++) 1596 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1)); 1597 1598 return std::make_tuple(Sizes[0], Sizes[1], Sizes[2]); 1599 } 1600 1601 void GPUNodeBuilder::insertStoreParameter(Instruction *Parameters, 1602 Instruction *Param, int Index) { 1603 Value *Slot = Builder.CreateGEP( 1604 Parameters, {Builder.getInt64(0), Builder.getInt64(Index)}); 1605 Value *ParamTyped = Builder.CreatePointerCast(Param, Builder.getInt8PtrTy()); 1606 Builder.CreateStore(ParamTyped, Slot); 1607 } 1608 1609 Value * 1610 GPUNodeBuilder::createLaunchParameters(ppcg_kernel *Kernel, Function *F, 1611 SetVector<Value *> SubtreeValues) { 1612 const int NumArgs = F->arg_size(); 1613 std::vector<int> ArgSizes(NumArgs); 1614 1615 // If we are using the OpenCL Runtime, we need to add the kernel argument 1616 // sizes to the end of the launch-parameter list, so OpenCL can determine 1617 // how big the respective kernel arguments are. 1618 // Here we need to reserve adequate space for that. 1619 Type *ArrayTy; 1620 if (Runtime == GPURuntime::OpenCL) 1621 ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), 2 * NumArgs); 1622 else 1623 ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumArgs); 1624 1625 BasicBlock *EntryBlock = 1626 &Builder.GetInsertBlock()->getParent()->getEntryBlock(); 1627 auto AddressSpace = F->getParent()->getDataLayout().getAllocaAddrSpace(); 1628 std::string Launch = "polly_launch_" + std::to_string(Kernel->id); 1629 Instruction *Parameters = new AllocaInst( 1630 ArrayTy, AddressSpace, Launch + "_params", EntryBlock->getTerminator()); 1631 1632 int Index = 0; 1633 for (long i = 0; i < Prog->n_array; i++) { 1634 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 1635 continue; 1636 1637 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 1638 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id)); 1639 1640 if (Runtime == GPURuntime::OpenCL) 1641 ArgSizes[Index] = SAI->getElemSizeInBytes(); 1642 1643 Value *DevArray = nullptr; 1644 if (PollyManagedMemory) { 1645 DevArray = getManagedDeviceArray(&Prog->array[i], 1646 const_cast<ScopArrayInfo *>(SAI)); 1647 } else { 1648 DevArray = DeviceAllocations[const_cast<ScopArrayInfo *>(SAI)]; 1649 DevArray = createCallGetDevicePtr(DevArray); 1650 } 1651 assert(DevArray != nullptr && "Array to be offloaded to device not " 1652 "initialized"); 1653 Value *Offset = getArrayOffset(&Prog->array[i]); 1654 1655 if (Offset) { 1656 DevArray = Builder.CreatePointerCast( 1657 DevArray, SAI->getElementType()->getPointerTo()); 1658 DevArray = Builder.CreateGEP(DevArray, Builder.CreateNeg(Offset)); 1659 DevArray = Builder.CreatePointerCast(DevArray, Builder.getInt8PtrTy()); 1660 } 1661 Value *Slot = Builder.CreateGEP( 1662 Parameters, {Builder.getInt64(0), Builder.getInt64(Index)}); 1663 1664 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 1665 Value *ValPtr = nullptr; 1666 if (PollyManagedMemory) 1667 ValPtr = DevArray; 1668 else 1669 ValPtr = BlockGen.getOrCreateAlloca(SAI); 1670 1671 assert(ValPtr != nullptr && "ValPtr that should point to a valid object" 1672 " to be stored into Parameters"); 1673 Value *ValPtrCast = 1674 Builder.CreatePointerCast(ValPtr, Builder.getInt8PtrTy()); 1675 Builder.CreateStore(ValPtrCast, Slot); 1676 } else { 1677 Instruction *Param = 1678 new AllocaInst(Builder.getInt8PtrTy(), AddressSpace, 1679 Launch + "_param_" + std::to_string(Index), 1680 EntryBlock->getTerminator()); 1681 Builder.CreateStore(DevArray, Param); 1682 Value *ParamTyped = 1683 Builder.CreatePointerCast(Param, Builder.getInt8PtrTy()); 1684 Builder.CreateStore(ParamTyped, Slot); 1685 } 1686 Index++; 1687 } 1688 1689 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set); 1690 1691 for (long i = 0; i < NumHostIters; i++) { 1692 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 1693 Value *Val = IDToValue[Id]; 1694 isl_id_free(Id); 1695 1696 if (Runtime == GPURuntime::OpenCL) 1697 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1698 1699 Instruction *Param = 1700 new AllocaInst(Val->getType(), AddressSpace, 1701 Launch + "_param_" + std::to_string(Index), 1702 EntryBlock->getTerminator()); 1703 Builder.CreateStore(Val, Param); 1704 insertStoreParameter(Parameters, Param, Index); 1705 Index++; 1706 } 1707 1708 int NumVars = isl_space_dim(Kernel->space, isl_dim_param); 1709 1710 for (long i = 0; i < NumVars; i++) { 1711 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 1712 Value *Val = IDToValue[Id]; 1713 if (ValueMap.count(Val)) 1714 Val = ValueMap[Val]; 1715 isl_id_free(Id); 1716 1717 if (Runtime == GPURuntime::OpenCL) 1718 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1719 1720 Instruction *Param = 1721 new AllocaInst(Val->getType(), AddressSpace, 1722 Launch + "_param_" + std::to_string(Index), 1723 EntryBlock->getTerminator()); 1724 Builder.CreateStore(Val, Param); 1725 insertStoreParameter(Parameters, Param, Index); 1726 Index++; 1727 } 1728 1729 for (auto Val : SubtreeValues) { 1730 if (Runtime == GPURuntime::OpenCL) 1731 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1732 1733 Instruction *Param = 1734 new AllocaInst(Val->getType(), AddressSpace, 1735 Launch + "_param_" + std::to_string(Index), 1736 EntryBlock->getTerminator()); 1737 Builder.CreateStore(Val, Param); 1738 insertStoreParameter(Parameters, Param, Index); 1739 Index++; 1740 } 1741 1742 if (Runtime == GPURuntime::OpenCL) { 1743 for (int i = 0; i < NumArgs; i++) { 1744 Value *Val = ConstantInt::get(Builder.getInt32Ty(), ArgSizes[i]); 1745 Instruction *Param = 1746 new AllocaInst(Builder.getInt32Ty(), AddressSpace, 1747 Launch + "_param_size_" + std::to_string(i), 1748 EntryBlock->getTerminator()); 1749 Builder.CreateStore(Val, Param); 1750 insertStoreParameter(Parameters, Param, Index); 1751 Index++; 1752 } 1753 } 1754 1755 auto Location = EntryBlock->getTerminator(); 1756 return new BitCastInst(Parameters, Builder.getInt8PtrTy(), 1757 Launch + "_params_i8ptr", Location); 1758 } 1759 1760 void GPUNodeBuilder::setupKernelSubtreeFunctions( 1761 SetVector<Function *> SubtreeFunctions) { 1762 for (auto Fn : SubtreeFunctions) { 1763 const std::string ClonedFnName = Fn->getName(); 1764 Function *Clone = GPUModule->getFunction(ClonedFnName); 1765 if (!Clone) 1766 Clone = 1767 Function::Create(Fn->getFunctionType(), GlobalValue::ExternalLinkage, 1768 ClonedFnName, GPUModule.get()); 1769 assert(Clone && "Expected cloned function to be initialized."); 1770 assert(ValueMap.find(Fn) == ValueMap.end() && 1771 "Fn already present in ValueMap"); 1772 ValueMap[Fn] = Clone; 1773 } 1774 } 1775 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) { 1776 isl_id *Id = isl_ast_node_get_annotation(KernelStmt); 1777 ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id); 1778 isl_id_free(Id); 1779 isl_ast_node_free(KernelStmt); 1780 1781 if (Kernel->n_grid > 1) 1782 DeepestParallel = 1783 std::max(DeepestParallel, isl_space_dim(Kernel->space, isl_dim_set)); 1784 else 1785 DeepestSequential = 1786 std::max(DeepestSequential, isl_space_dim(Kernel->space, isl_dim_set)); 1787 1788 Value *BlockDimX, *BlockDimY, *BlockDimZ; 1789 std::tie(BlockDimX, BlockDimY, BlockDimZ) = getBlockSizes(Kernel); 1790 1791 SetVector<Value *> SubtreeValues; 1792 SetVector<Function *> SubtreeFunctions; 1793 SetVector<const Loop *> Loops; 1794 isl::space ParamSpace; 1795 std::tie(SubtreeValues, SubtreeFunctions, Loops, ParamSpace) = 1796 getReferencesInKernel(Kernel); 1797 1798 // Add parameters that appear only in the access function to the kernel 1799 // space. This is important to make sure that all isl_ids are passed as 1800 // parameters to the kernel, even though we may not have all parameters 1801 // in the context to improve compile time. 1802 Kernel->space = isl_space_align_params(Kernel->space, ParamSpace.release()); 1803 1804 assert(Kernel->tree && "Device AST of kernel node is empty"); 1805 1806 Instruction &HostInsertPoint = *Builder.GetInsertPoint(); 1807 IslExprBuilder::IDToValueTy HostIDs = IDToValue; 1808 ValueMapT HostValueMap = ValueMap; 1809 BlockGenerator::AllocaMapTy HostScalarMap = ScalarMap; 1810 ScalarMap.clear(); 1811 BlockGenerator::EscapeUsersAllocaMapTy HostEscapeMap = EscapeMap; 1812 EscapeMap.clear(); 1813 1814 // Create for all loops we depend on values that contain the current loop 1815 // iteration. These values are necessary to generate code for SCEVs that 1816 // depend on such loops. As a result we need to pass them to the subfunction. 1817 for (const Loop *L : Loops) { 1818 const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)), 1819 SE.getUnknown(Builder.getInt64(1)), 1820 L, SCEV::FlagAnyWrap); 1821 Value *V = generateSCEV(OuterLIV); 1822 OutsideLoopIterations[L] = SE.getUnknown(V); 1823 SubtreeValues.insert(V); 1824 } 1825 1826 createKernelFunction(Kernel, SubtreeValues, SubtreeFunctions); 1827 setupKernelSubtreeFunctions(SubtreeFunctions); 1828 1829 create(isl_ast_node_copy(Kernel->tree)); 1830 1831 finalizeKernelArguments(Kernel); 1832 Function *F = Builder.GetInsertBlock()->getParent(); 1833 if (Arch == GPUArch::NVPTX64) 1834 addCUDAAnnotations(F->getParent(), BlockDimX, BlockDimY, BlockDimZ); 1835 clearDominators(F); 1836 clearScalarEvolution(F); 1837 clearLoops(F); 1838 1839 IDToValue = HostIDs; 1840 1841 ValueMap = std::move(HostValueMap); 1842 ScalarMap = std::move(HostScalarMap); 1843 EscapeMap = std::move(HostEscapeMap); 1844 IDToSAI.clear(); 1845 Annotator.resetAlternativeAliasBases(); 1846 for (auto &BasePtr : LocalArrays) 1847 S.invalidateScopArrayInfo(BasePtr, MemoryKind::Array); 1848 LocalArrays.clear(); 1849 1850 std::string ASMString = finalizeKernelFunction(); 1851 Builder.SetInsertPoint(&HostInsertPoint); 1852 Value *Parameters = createLaunchParameters(Kernel, F, SubtreeValues); 1853 1854 std::string Name = getKernelFuncName(Kernel->id); 1855 Value *KernelString = Builder.CreateGlobalStringPtr(ASMString, Name); 1856 Value *NameString = Builder.CreateGlobalStringPtr(Name, Name + "_name"); 1857 Value *GPUKernel = createCallGetKernel(KernelString, NameString); 1858 1859 Value *GridDimX, *GridDimY; 1860 std::tie(GridDimX, GridDimY) = getGridSizes(Kernel); 1861 1862 createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY, 1863 BlockDimZ, Parameters); 1864 createCallFreeKernel(GPUKernel); 1865 1866 for (auto Id : KernelIds) 1867 isl_id_free(Id); 1868 1869 KernelIds.clear(); 1870 } 1871 1872 /// Compute the DataLayout string for the NVPTX backend. 1873 /// 1874 /// @param is64Bit Are we looking for a 64 bit architecture? 1875 static std::string computeNVPTXDataLayout(bool is64Bit) { 1876 std::string Ret = ""; 1877 1878 if (!is64Bit) { 1879 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1880 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:" 1881 "64-v128:128:128-n16:32:64"; 1882 } else { 1883 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1884 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:" 1885 "64-v128:128:128-n16:32:64"; 1886 } 1887 1888 return Ret; 1889 } 1890 1891 /// Compute the DataLayout string for a SPIR kernel. 1892 /// 1893 /// @param is64Bit Are we looking for a 64 bit architecture? 1894 static std::string computeSPIRDataLayout(bool is64Bit) { 1895 std::string Ret = ""; 1896 1897 if (!is64Bit) { 1898 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1899 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:" 1900 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:" 1901 "256:256-v256:256:256-v512:512:512-v1024:1024:1024"; 1902 } else { 1903 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1904 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:" 1905 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:" 1906 "256:256-v256:256:256-v512:512:512-v1024:1024:1024"; 1907 } 1908 1909 return Ret; 1910 } 1911 1912 Function * 1913 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel, 1914 SetVector<Value *> &SubtreeValues) { 1915 std::vector<Type *> Args; 1916 std::string Identifier = getKernelFuncName(Kernel->id); 1917 1918 std::vector<Metadata *> MemoryType; 1919 1920 for (long i = 0; i < Prog->n_array; i++) { 1921 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 1922 continue; 1923 1924 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 1925 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 1926 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id)); 1927 Args.push_back(SAI->getElementType()); 1928 MemoryType.push_back( 1929 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1930 } else { 1931 static const int UseGlobalMemory = 1; 1932 Args.push_back(Builder.getInt8PtrTy(UseGlobalMemory)); 1933 MemoryType.push_back( 1934 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 1))); 1935 } 1936 } 1937 1938 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set); 1939 1940 for (long i = 0; i < NumHostIters; i++) { 1941 Args.push_back(Builder.getInt64Ty()); 1942 MemoryType.push_back( 1943 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1944 } 1945 1946 int NumVars = isl_space_dim(Kernel->space, isl_dim_param); 1947 1948 for (long i = 0; i < NumVars; i++) { 1949 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 1950 Value *Val = IDToValue[Id]; 1951 isl_id_free(Id); 1952 Args.push_back(Val->getType()); 1953 MemoryType.push_back( 1954 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1955 } 1956 1957 for (auto *V : SubtreeValues) { 1958 Args.push_back(V->getType()); 1959 MemoryType.push_back( 1960 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1961 } 1962 1963 auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false); 1964 auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier, 1965 GPUModule.get()); 1966 1967 std::vector<Metadata *> EmptyStrings; 1968 1969 for (unsigned int i = 0; i < MemoryType.size(); i++) { 1970 EmptyStrings.push_back(MDString::get(FN->getContext(), "")); 1971 } 1972 1973 if (Arch == GPUArch::SPIR32 || Arch == GPUArch::SPIR64) { 1974 FN->setMetadata("kernel_arg_addr_space", 1975 MDNode::get(FN->getContext(), MemoryType)); 1976 FN->setMetadata("kernel_arg_name", 1977 MDNode::get(FN->getContext(), EmptyStrings)); 1978 FN->setMetadata("kernel_arg_access_qual", 1979 MDNode::get(FN->getContext(), EmptyStrings)); 1980 FN->setMetadata("kernel_arg_type", 1981 MDNode::get(FN->getContext(), EmptyStrings)); 1982 FN->setMetadata("kernel_arg_type_qual", 1983 MDNode::get(FN->getContext(), EmptyStrings)); 1984 FN->setMetadata("kernel_arg_base_type", 1985 MDNode::get(FN->getContext(), EmptyStrings)); 1986 } 1987 1988 switch (Arch) { 1989 case GPUArch::NVPTX64: 1990 FN->setCallingConv(CallingConv::PTX_Kernel); 1991 break; 1992 case GPUArch::SPIR32: 1993 case GPUArch::SPIR64: 1994 FN->setCallingConv(CallingConv::SPIR_KERNEL); 1995 break; 1996 } 1997 1998 auto Arg = FN->arg_begin(); 1999 for (long i = 0; i < Kernel->n_array; i++) { 2000 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 2001 continue; 2002 2003 Arg->setName(Kernel->array[i].array->name); 2004 2005 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 2006 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id)); 2007 Type *EleTy = SAI->getElementType(); 2008 Value *Val = &*Arg; 2009 SmallVector<const SCEV *, 4> Sizes; 2010 isl_ast_build *Build = 2011 isl_ast_build_from_context(isl_set_copy(Prog->context)); 2012 Sizes.push_back(nullptr); 2013 for (long j = 1, n = Kernel->array[i].array->n_index; j < n; j++) { 2014 isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff( 2015 Build, isl_multi_pw_aff_get_pw_aff(Kernel->array[i].array->bound, j)); 2016 auto V = ExprBuilder.create(DimSize); 2017 Sizes.push_back(SE.getSCEV(V)); 2018 } 2019 const ScopArrayInfo *SAIRep = 2020 S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, MemoryKind::Array); 2021 LocalArrays.push_back(Val); 2022 2023 isl_ast_build_free(Build); 2024 KernelIds.push_back(Id); 2025 IDToSAI[Id] = SAIRep; 2026 Arg++; 2027 } 2028 2029 for (long i = 0; i < NumHostIters; i++) { 2030 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 2031 Arg->setName(isl_id_get_name(Id)); 2032 IDToValue[Id] = &*Arg; 2033 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2034 Arg++; 2035 } 2036 2037 for (long i = 0; i < NumVars; i++) { 2038 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 2039 Arg->setName(isl_id_get_name(Id)); 2040 Value *Val = IDToValue[Id]; 2041 ValueMap[Val] = &*Arg; 2042 IDToValue[Id] = &*Arg; 2043 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2044 Arg++; 2045 } 2046 2047 for (auto *V : SubtreeValues) { 2048 Arg->setName(V->getName()); 2049 ValueMap[V] = &*Arg; 2050 Arg++; 2051 } 2052 2053 return FN; 2054 } 2055 2056 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) { 2057 Intrinsic::ID IntrinsicsBID[2]; 2058 Intrinsic::ID IntrinsicsTID[3]; 2059 2060 switch (Arch) { 2061 case GPUArch::SPIR64: 2062 case GPUArch::SPIR32: 2063 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR"); 2064 case GPUArch::NVPTX64: 2065 IntrinsicsBID[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x; 2066 IntrinsicsBID[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y; 2067 2068 IntrinsicsTID[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x; 2069 IntrinsicsTID[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y; 2070 IntrinsicsTID[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z; 2071 break; 2072 } 2073 2074 auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable { 2075 std::string Name = isl_id_get_name(Id); 2076 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 2077 Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr); 2078 Value *Val = Builder.CreateCall(IntrinsicFn, {}); 2079 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name); 2080 IDToValue[Id] = Val; 2081 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2082 }; 2083 2084 for (int i = 0; i < Kernel->n_grid; ++i) { 2085 isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i); 2086 addId(Id, IntrinsicsBID[i]); 2087 } 2088 2089 for (int i = 0; i < Kernel->n_block; ++i) { 2090 isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i); 2091 addId(Id, IntrinsicsTID[i]); 2092 } 2093 } 2094 2095 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel *Kernel, 2096 bool SizeTypeIs64bit) { 2097 const char *GroupName[3] = {"__gen_ocl_get_group_id0", 2098 "__gen_ocl_get_group_id1", 2099 "__gen_ocl_get_group_id2"}; 2100 2101 const char *LocalName[3] = {"__gen_ocl_get_local_id0", 2102 "__gen_ocl_get_local_id1", 2103 "__gen_ocl_get_local_id2"}; 2104 IntegerType *SizeT = 2105 SizeTypeIs64bit ? Builder.getInt64Ty() : Builder.getInt32Ty(); 2106 2107 auto createFunc = [this](const char *Name, __isl_take isl_id *Id, 2108 IntegerType *SizeT) mutable { 2109 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 2110 Function *FN = M->getFunction(Name); 2111 2112 // If FN is not available, declare it. 2113 if (!FN) { 2114 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 2115 std::vector<Type *> Args; 2116 FunctionType *Ty = FunctionType::get(SizeT, Args, false); 2117 FN = Function::Create(Ty, Linkage, Name, M); 2118 FN->setCallingConv(CallingConv::SPIR_FUNC); 2119 } 2120 2121 Value *Val = Builder.CreateCall(FN, {}); 2122 if (SizeT == Builder.getInt32Ty()) 2123 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name); 2124 IDToValue[Id] = Val; 2125 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2126 }; 2127 2128 for (int i = 0; i < Kernel->n_grid; ++i) 2129 createFunc(GroupName[i], isl_id_list_get_id(Kernel->block_ids, i), SizeT); 2130 2131 for (int i = 0; i < Kernel->n_block; ++i) 2132 createFunc(LocalName[i], isl_id_list_get_id(Kernel->thread_ids, i), SizeT); 2133 } 2134 2135 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel *Kernel, Function *FN) { 2136 auto Arg = FN->arg_begin(); 2137 for (long i = 0; i < Kernel->n_array; i++) { 2138 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 2139 continue; 2140 2141 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 2142 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id)); 2143 isl_id_free(Id); 2144 2145 if (SAI->getNumberOfDimensions() > 0) { 2146 Arg++; 2147 continue; 2148 } 2149 2150 Value *Val = &*Arg; 2151 2152 if (!gpu_array_is_read_only_scalar(&Prog->array[i])) { 2153 Type *TypePtr = SAI->getElementType()->getPointerTo(); 2154 Value *TypedArgPtr = Builder.CreatePointerCast(Val, TypePtr); 2155 Val = Builder.CreateLoad(TypedArgPtr); 2156 } 2157 2158 Value *Alloca = BlockGen.getOrCreateAlloca(SAI); 2159 Builder.CreateStore(Val, Alloca); 2160 2161 Arg++; 2162 } 2163 } 2164 2165 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel *Kernel) { 2166 auto *FN = Builder.GetInsertBlock()->getParent(); 2167 auto Arg = FN->arg_begin(); 2168 2169 bool StoredScalar = false; 2170 for (long i = 0; i < Kernel->n_array; i++) { 2171 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 2172 continue; 2173 2174 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 2175 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage_copy(Id)); 2176 isl_id_free(Id); 2177 2178 if (SAI->getNumberOfDimensions() > 0) { 2179 Arg++; 2180 continue; 2181 } 2182 2183 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 2184 Arg++; 2185 continue; 2186 } 2187 2188 Value *Alloca = BlockGen.getOrCreateAlloca(SAI); 2189 Value *ArgPtr = &*Arg; 2190 Type *TypePtr = SAI->getElementType()->getPointerTo(); 2191 Value *TypedArgPtr = Builder.CreatePointerCast(ArgPtr, TypePtr); 2192 Value *Val = Builder.CreateLoad(Alloca); 2193 Builder.CreateStore(Val, TypedArgPtr); 2194 StoredScalar = true; 2195 2196 Arg++; 2197 } 2198 2199 if (StoredScalar) { 2200 /// In case more than one thread contains scalar stores, the generated 2201 /// code might be incorrect, if we only store at the end of the kernel. 2202 /// To support this case we need to store these scalars back at each 2203 /// memory store or at least before each kernel barrier. 2204 if (Kernel->n_block != 0 || Kernel->n_grid != 0) { 2205 BuildSuccessful = 0; 2206 LLVM_DEBUG( 2207 dbgs() << getUniqueScopName(&S) 2208 << " has a store to a scalar value that" 2209 " would be undefined to run in parallel. Bailing out.\n";); 2210 } 2211 } 2212 } 2213 2214 void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) { 2215 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 2216 2217 for (int i = 0; i < Kernel->n_var; ++i) { 2218 struct ppcg_kernel_var &Var = Kernel->var[i]; 2219 isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set); 2220 Type *EleTy = ScopArrayInfo::getFromId(isl::manage(Id))->getElementType(); 2221 2222 Type *ArrayTy = EleTy; 2223 SmallVector<const SCEV *, 4> Sizes; 2224 2225 Sizes.push_back(nullptr); 2226 for (unsigned int j = 1; j < Var.array->n_index; ++j) { 2227 isl_val *Val = isl_vec_get_element_val(Var.size, j); 2228 long Bound = isl_val_get_num_si(Val); 2229 isl_val_free(Val); 2230 Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound)); 2231 } 2232 2233 for (int j = Var.array->n_index - 1; j >= 0; --j) { 2234 isl_val *Val = isl_vec_get_element_val(Var.size, j); 2235 long Bound = isl_val_get_num_si(Val); 2236 isl_val_free(Val); 2237 ArrayTy = ArrayType::get(ArrayTy, Bound); 2238 } 2239 2240 const ScopArrayInfo *SAI; 2241 Value *Allocation; 2242 if (Var.type == ppcg_access_shared) { 2243 auto GlobalVar = new GlobalVariable( 2244 *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name, 2245 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal, 3); 2246 GlobalVar->setAlignment(llvm::Align(EleTy->getPrimitiveSizeInBits() / 8)); 2247 GlobalVar->setInitializer(Constant::getNullValue(ArrayTy)); 2248 2249 Allocation = GlobalVar; 2250 } else if (Var.type == ppcg_access_private) { 2251 Allocation = Builder.CreateAlloca(ArrayTy, 0, "private_array"); 2252 } else { 2253 llvm_unreachable("unknown variable type"); 2254 } 2255 SAI = 2256 S.getOrCreateScopArrayInfo(Allocation, EleTy, Sizes, MemoryKind::Array); 2257 Id = isl_id_alloc(S.getIslCtx().get(), Var.name, nullptr); 2258 IDToValue[Id] = Allocation; 2259 LocalArrays.push_back(Allocation); 2260 KernelIds.push_back(Id); 2261 IDToSAI[Id] = SAI; 2262 } 2263 } 2264 2265 void GPUNodeBuilder::createKernelFunction( 2266 ppcg_kernel *Kernel, SetVector<Value *> &SubtreeValues, 2267 SetVector<Function *> &SubtreeFunctions) { 2268 std::string Identifier = getKernelFuncName(Kernel->id); 2269 GPUModule.reset(new Module(Identifier, Builder.getContext())); 2270 2271 switch (Arch) { 2272 case GPUArch::NVPTX64: 2273 if (Runtime == GPURuntime::CUDA) 2274 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda")); 2275 else if (Runtime == GPURuntime::OpenCL) 2276 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl")); 2277 GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */)); 2278 break; 2279 case GPUArch::SPIR32: 2280 GPUModule->setTargetTriple(Triple::normalize("spir-unknown-unknown")); 2281 GPUModule->setDataLayout(computeSPIRDataLayout(false /* is64Bit */)); 2282 break; 2283 case GPUArch::SPIR64: 2284 GPUModule->setTargetTriple(Triple::normalize("spir64-unknown-unknown")); 2285 GPUModule->setDataLayout(computeSPIRDataLayout(true /* is64Bit */)); 2286 break; 2287 } 2288 2289 Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues); 2290 2291 BasicBlock *PrevBlock = Builder.GetInsertBlock(); 2292 auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN); 2293 2294 DT.addNewBlock(EntryBlock, PrevBlock); 2295 2296 Builder.SetInsertPoint(EntryBlock); 2297 Builder.CreateRetVoid(); 2298 Builder.SetInsertPoint(EntryBlock, EntryBlock->begin()); 2299 2300 ScopDetection::markFunctionAsInvalid(FN); 2301 2302 prepareKernelArguments(Kernel, FN); 2303 createKernelVariables(Kernel, FN); 2304 2305 switch (Arch) { 2306 case GPUArch::NVPTX64: 2307 insertKernelIntrinsics(Kernel); 2308 break; 2309 case GPUArch::SPIR32: 2310 insertKernelCallsSPIR(Kernel, false); 2311 break; 2312 case GPUArch::SPIR64: 2313 insertKernelCallsSPIR(Kernel, true); 2314 break; 2315 } 2316 } 2317 2318 std::string GPUNodeBuilder::createKernelASM() { 2319 llvm::Triple GPUTriple; 2320 2321 switch (Arch) { 2322 case GPUArch::NVPTX64: 2323 switch (Runtime) { 2324 case GPURuntime::CUDA: 2325 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda")); 2326 break; 2327 case GPURuntime::OpenCL: 2328 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl")); 2329 break; 2330 } 2331 break; 2332 case GPUArch::SPIR64: 2333 case GPUArch::SPIR32: 2334 std::string SPIRAssembly; 2335 raw_string_ostream IROstream(SPIRAssembly); 2336 IROstream << *GPUModule; 2337 IROstream.flush(); 2338 return SPIRAssembly; 2339 } 2340 2341 std::string ErrMsg; 2342 auto GPUTarget = TargetRegistry::lookupTarget(GPUTriple.getTriple(), ErrMsg); 2343 2344 if (!GPUTarget) { 2345 errs() << ErrMsg << "\n"; 2346 return ""; 2347 } 2348 2349 TargetOptions Options; 2350 Options.UnsafeFPMath = FastMath; 2351 2352 std::string subtarget; 2353 2354 switch (Arch) { 2355 case GPUArch::NVPTX64: 2356 subtarget = CudaVersion; 2357 break; 2358 case GPUArch::SPIR32: 2359 case GPUArch::SPIR64: 2360 llvm_unreachable("No subtarget for SPIR architecture"); 2361 } 2362 2363 std::unique_ptr<TargetMachine> TargetM(GPUTarget->createTargetMachine( 2364 GPUTriple.getTriple(), subtarget, "", Options, Optional<Reloc::Model>())); 2365 2366 SmallString<0> ASMString; 2367 raw_svector_ostream ASMStream(ASMString); 2368 llvm::legacy::PassManager PM; 2369 2370 PM.add(createTargetTransformInfoWrapperPass(TargetM->getTargetIRAnalysis())); 2371 2372 if (TargetM->addPassesToEmitFile(PM, ASMStream, nullptr, CGFT_AssemblyFile, 2373 true /* verify */)) { 2374 errs() << "The target does not support generation of this file type!\n"; 2375 return ""; 2376 } 2377 2378 PM.run(*GPUModule); 2379 2380 return ASMStream.str(); 2381 } 2382 2383 bool GPUNodeBuilder::requiresCUDALibDevice() { 2384 bool RequiresLibDevice = false; 2385 for (Function &F : GPUModule->functions()) { 2386 if (!F.isDeclaration()) 2387 continue; 2388 2389 const std::string CUDALibDeviceFunc = getCUDALibDeviceFuntion(F.getName()); 2390 if (CUDALibDeviceFunc.length() != 0) { 2391 // We need to handle the case where a module looks like this: 2392 // @expf(..) 2393 // @llvm.exp.f64(..) 2394 // Both of these functions would be renamed to `__nv_expf`. 2395 // 2396 // So, we must first check for the existence of the libdevice function. 2397 // If this exists, we replace our current function with it. 2398 // 2399 // If it does not exist, we rename the current function to the 2400 // libdevice functiono name. 2401 if (Function *Replacement = F.getParent()->getFunction(CUDALibDeviceFunc)) 2402 F.replaceAllUsesWith(Replacement); 2403 else 2404 F.setName(CUDALibDeviceFunc); 2405 RequiresLibDevice = true; 2406 } 2407 } 2408 2409 return RequiresLibDevice; 2410 } 2411 2412 void GPUNodeBuilder::addCUDALibDevice() { 2413 if (Arch != GPUArch::NVPTX64) 2414 return; 2415 2416 if (requiresCUDALibDevice()) { 2417 SMDiagnostic Error; 2418 2419 errs() << CUDALibDevice << "\n"; 2420 auto LibDeviceModule = 2421 parseIRFile(CUDALibDevice, Error, GPUModule->getContext()); 2422 2423 if (!LibDeviceModule) { 2424 BuildSuccessful = false; 2425 report_fatal_error("Could not find or load libdevice. Skipping GPU " 2426 "kernel generation. Please set -polly-acc-libdevice " 2427 "accordingly.\n"); 2428 return; 2429 } 2430 2431 Linker L(*GPUModule); 2432 2433 // Set an nvptx64 target triple to avoid linker warnings. The original 2434 // triple of the libdevice files are nvptx-unknown-unknown. 2435 LibDeviceModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda")); 2436 L.linkInModule(std::move(LibDeviceModule), Linker::LinkOnlyNeeded); 2437 } 2438 } 2439 2440 std::string GPUNodeBuilder::finalizeKernelFunction() { 2441 2442 if (verifyModule(*GPUModule)) { 2443 LLVM_DEBUG(dbgs() << "verifyModule failed on module:\n"; 2444 GPUModule->print(dbgs(), nullptr); dbgs() << "\n";); 2445 LLVM_DEBUG(dbgs() << "verifyModule Error:\n"; 2446 verifyModule(*GPUModule, &dbgs());); 2447 2448 if (FailOnVerifyModuleFailure) 2449 llvm_unreachable("VerifyModule failed."); 2450 2451 BuildSuccessful = false; 2452 return ""; 2453 } 2454 2455 addCUDALibDevice(); 2456 2457 if (DumpKernelIR) 2458 outs() << *GPUModule << "\n"; 2459 2460 if (Arch != GPUArch::SPIR32 && Arch != GPUArch::SPIR64) { 2461 // Optimize module. 2462 llvm::legacy::PassManager OptPasses; 2463 PassManagerBuilder PassBuilder; 2464 PassBuilder.OptLevel = 3; 2465 PassBuilder.SizeLevel = 0; 2466 PassBuilder.populateModulePassManager(OptPasses); 2467 OptPasses.run(*GPUModule); 2468 } 2469 2470 std::string Assembly = createKernelASM(); 2471 2472 if (DumpKernelASM) 2473 outs() << Assembly << "\n"; 2474 2475 GPUModule.release(); 2476 KernelIDs.clear(); 2477 2478 return Assembly; 2479 } 2480 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff` 2481 /// @param PwAffs The list of piecewise affine functions to create an 2482 /// `isl_pw_aff_list` from. We expect an rvalue ref because 2483 /// all the isl_pw_aff are used up by this function. 2484 /// 2485 /// @returns The `isl_pw_aff_list`. 2486 __isl_give isl_pw_aff_list * 2487 createPwAffList(isl_ctx *Context, 2488 const std::vector<__isl_take isl_pw_aff *> &&PwAffs) { 2489 isl_pw_aff_list *List = isl_pw_aff_list_alloc(Context, PwAffs.size()); 2490 2491 for (unsigned i = 0; i < PwAffs.size(); i++) { 2492 List = isl_pw_aff_list_insert(List, i, PwAffs[i]); 2493 } 2494 return List; 2495 } 2496 2497 /// Align all the `PwAffs` such that they have the same parameter dimensions. 2498 /// 2499 /// We loop over all `pw_aff` and align all of their spaces together to 2500 /// create a common space for all the `pw_aff`. This common space is the 2501 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start 2502 /// with the given `SeedSpace`. 2503 /// @param PwAffs The list of piecewise affine functions we want to align. 2504 /// This is an rvalue reference because the entire vector is 2505 /// used up by the end of the operation. 2506 /// @param SeedSpace The space to start the alignment process with. 2507 /// @returns A std::pair, whose first element is the aligned space, 2508 /// whose second element is the vector of aligned piecewise 2509 /// affines. 2510 static std::pair<__isl_give isl_space *, std::vector<__isl_give isl_pw_aff *>> 2511 alignPwAffs(const std::vector<__isl_take isl_pw_aff *> &&PwAffs, 2512 __isl_take isl_space *SeedSpace) { 2513 assert(SeedSpace && "Invalid seed space given."); 2514 2515 isl_space *AlignSpace = SeedSpace; 2516 for (isl_pw_aff *PwAff : PwAffs) { 2517 isl_space *PwAffSpace = isl_pw_aff_get_domain_space(PwAff); 2518 AlignSpace = isl_space_align_params(AlignSpace, PwAffSpace); 2519 } 2520 std::vector<isl_pw_aff *> AdjustedPwAffs; 2521 2522 for (unsigned i = 0; i < PwAffs.size(); i++) { 2523 isl_pw_aff *Adjusted = PwAffs[i]; 2524 assert(Adjusted && "Invalid pw_aff given."); 2525 Adjusted = isl_pw_aff_align_params(Adjusted, isl_space_copy(AlignSpace)); 2526 AdjustedPwAffs.push_back(Adjusted); 2527 } 2528 return std::make_pair(AlignSpace, AdjustedPwAffs); 2529 } 2530 2531 namespace { 2532 class PPCGCodeGeneration : public ScopPass { 2533 public: 2534 static char ID; 2535 2536 GPURuntime Runtime = GPURuntime::CUDA; 2537 2538 GPUArch Architecture = GPUArch::NVPTX64; 2539 2540 /// The scop that is currently processed. 2541 Scop *S; 2542 2543 LoopInfo *LI; 2544 DominatorTree *DT; 2545 ScalarEvolution *SE; 2546 const DataLayout *DL; 2547 RegionInfo *RI; 2548 2549 PPCGCodeGeneration() : ScopPass(ID) {} 2550 2551 /// Construct compilation options for PPCG. 2552 /// 2553 /// @returns The compilation options. 2554 ppcg_options *createPPCGOptions() { 2555 auto DebugOptions = 2556 (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options)); 2557 auto Options = (ppcg_options *)malloc(sizeof(ppcg_options)); 2558 2559 DebugOptions->dump_schedule_constraints = false; 2560 DebugOptions->dump_schedule = false; 2561 DebugOptions->dump_final_schedule = false; 2562 DebugOptions->dump_sizes = false; 2563 DebugOptions->verbose = false; 2564 2565 Options->debug = DebugOptions; 2566 2567 Options->group_chains = false; 2568 Options->reschedule = true; 2569 Options->scale_tile_loops = false; 2570 Options->wrap = false; 2571 2572 Options->non_negative_parameters = false; 2573 Options->ctx = nullptr; 2574 Options->sizes = nullptr; 2575 2576 Options->tile = true; 2577 Options->tile_size = 32; 2578 2579 Options->isolate_full_tiles = false; 2580 2581 Options->use_private_memory = PrivateMemory; 2582 Options->use_shared_memory = SharedMemory; 2583 Options->max_shared_memory = 48 * 1024; 2584 2585 Options->target = PPCG_TARGET_CUDA; 2586 Options->openmp = false; 2587 Options->linearize_device_arrays = true; 2588 Options->allow_gnu_extensions = false; 2589 2590 Options->unroll_copy_shared = false; 2591 Options->unroll_gpu_tile = false; 2592 Options->live_range_reordering = true; 2593 2594 Options->live_range_reordering = true; 2595 Options->hybrid = false; 2596 Options->opencl_compiler_options = nullptr; 2597 Options->opencl_use_gpu = false; 2598 Options->opencl_n_include_file = 0; 2599 Options->opencl_include_files = nullptr; 2600 Options->opencl_print_kernel_types = false; 2601 Options->opencl_embed_kernel_code = false; 2602 2603 Options->save_schedule_file = nullptr; 2604 Options->load_schedule_file = nullptr; 2605 2606 return Options; 2607 } 2608 2609 /// Get a tagged access relation containing all accesses of type @p AccessTy. 2610 /// 2611 /// Instead of a normal access of the form: 2612 /// 2613 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)] 2614 /// 2615 /// a tagged access has the form 2616 /// 2617 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)] 2618 /// 2619 /// where 'id' is an additional space that references the memory access that 2620 /// triggered the access. 2621 /// 2622 /// @param AccessTy The type of the memory accesses to collect. 2623 /// 2624 /// @return The relation describing all tagged memory accesses. 2625 isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) { 2626 isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace().release()); 2627 2628 for (auto &Stmt : *S) 2629 for (auto &Acc : Stmt) 2630 if (Acc->getType() == AccessTy) { 2631 isl_map *Relation = Acc->getAccessRelation().release(); 2632 Relation = 2633 isl_map_intersect_domain(Relation, Stmt.getDomain().release()); 2634 2635 isl_space *Space = isl_map_get_space(Relation); 2636 Space = isl_space_range(Space); 2637 Space = isl_space_from_range(Space); 2638 Space = 2639 isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release()); 2640 isl_map *Universe = isl_map_universe(Space); 2641 Relation = isl_map_domain_product(Relation, Universe); 2642 Accesses = isl_union_map_add_map(Accesses, Relation); 2643 } 2644 2645 return Accesses; 2646 } 2647 2648 /// Get the set of all read accesses, tagged with the access id. 2649 /// 2650 /// @see getTaggedAccesses 2651 isl_union_map *getTaggedReads() { 2652 return getTaggedAccesses(MemoryAccess::READ); 2653 } 2654 2655 /// Get the set of all may (and must) accesses, tagged with the access id. 2656 /// 2657 /// @see getTaggedAccesses 2658 isl_union_map *getTaggedMayWrites() { 2659 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE), 2660 getTaggedAccesses(MemoryAccess::MUST_WRITE)); 2661 } 2662 2663 /// Get the set of all must accesses, tagged with the access id. 2664 /// 2665 /// @see getTaggedAccesses 2666 isl_union_map *getTaggedMustWrites() { 2667 return getTaggedAccesses(MemoryAccess::MUST_WRITE); 2668 } 2669 2670 /// Collect parameter and array names as isl_ids. 2671 /// 2672 /// To reason about the different parameters and arrays used, ppcg requires 2673 /// a list of all isl_ids in use. As PPCG traditionally performs 2674 /// source-to-source compilation each of these isl_ids is mapped to the 2675 /// expression that represents it. As we do not have a corresponding 2676 /// expression in Polly, we just map each id to a 'zero' expression to match 2677 /// the data format that ppcg expects. 2678 /// 2679 /// @returns Retun a map from collected ids to 'zero' ast expressions. 2680 __isl_give isl_id_to_ast_expr *getNames() { 2681 auto *Names = isl_id_to_ast_expr_alloc( 2682 S->getIslCtx().get(), 2683 S->getNumParams() + std::distance(S->array_begin(), S->array_end())); 2684 auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx().get())); 2685 2686 for (const SCEV *P : S->parameters()) { 2687 isl_id *Id = S->getIdForParam(P).release(); 2688 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero)); 2689 } 2690 2691 for (auto &Array : S->arrays()) { 2692 auto Id = Array->getBasePtrId().release(); 2693 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero)); 2694 } 2695 2696 isl_ast_expr_free(Zero); 2697 2698 return Names; 2699 } 2700 2701 /// Create a new PPCG scop from the current scop. 2702 /// 2703 /// The PPCG scop is initialized with data from the current polly::Scop. From 2704 /// this initial data, the data-dependences in the PPCG scop are initialized. 2705 /// We do not use Polly's dependence analysis for now, to ensure we match 2706 /// the PPCG default behaviour more closely. 2707 /// 2708 /// @returns A new ppcg scop. 2709 ppcg_scop *createPPCGScop() { 2710 MustKillsInfo KillsInfo = computeMustKillsInfo(*S); 2711 2712 auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop)); 2713 2714 PPCGScop->options = createPPCGOptions(); 2715 // enable live range reordering 2716 PPCGScop->options->live_range_reordering = 1; 2717 2718 PPCGScop->start = 0; 2719 PPCGScop->end = 0; 2720 2721 PPCGScop->context = S->getContext().release(); 2722 PPCGScop->domain = S->getDomains().release(); 2723 // TODO: investigate this further. PPCG calls collect_call_domains. 2724 PPCGScop->call = isl_union_set_from_set(S->getContext().release()); 2725 PPCGScop->tagged_reads = getTaggedReads(); 2726 PPCGScop->reads = S->getReads().release(); 2727 PPCGScop->live_in = nullptr; 2728 PPCGScop->tagged_may_writes = getTaggedMayWrites(); 2729 PPCGScop->may_writes = S->getWrites().release(); 2730 PPCGScop->tagged_must_writes = getTaggedMustWrites(); 2731 PPCGScop->must_writes = S->getMustWrites().release(); 2732 PPCGScop->live_out = nullptr; 2733 PPCGScop->tagged_must_kills = KillsInfo.TaggedMustKills.release(); 2734 PPCGScop->must_kills = KillsInfo.MustKills.release(); 2735 2736 PPCGScop->tagger = nullptr; 2737 PPCGScop->independence = 2738 isl_union_map_empty(isl_set_get_space(PPCGScop->context)); 2739 PPCGScop->dep_flow = nullptr; 2740 PPCGScop->tagged_dep_flow = nullptr; 2741 PPCGScop->dep_false = nullptr; 2742 PPCGScop->dep_forced = nullptr; 2743 PPCGScop->dep_order = nullptr; 2744 PPCGScop->tagged_dep_order = nullptr; 2745 2746 PPCGScop->schedule = S->getScheduleTree().release(); 2747 // If we have something non-trivial to kill, add it to the schedule 2748 if (KillsInfo.KillsSchedule.get()) 2749 PPCGScop->schedule = isl_schedule_sequence( 2750 PPCGScop->schedule, KillsInfo.KillsSchedule.release()); 2751 2752 PPCGScop->names = getNames(); 2753 PPCGScop->pet = nullptr; 2754 2755 compute_tagger(PPCGScop); 2756 compute_dependences(PPCGScop); 2757 eliminate_dead_code(PPCGScop); 2758 2759 return PPCGScop; 2760 } 2761 2762 /// Collect the array accesses in a statement. 2763 /// 2764 /// @param Stmt The statement for which to collect the accesses. 2765 /// 2766 /// @returns A list of array accesses. 2767 gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) { 2768 gpu_stmt_access *Accesses = nullptr; 2769 2770 for (MemoryAccess *Acc : Stmt) { 2771 auto Access = 2772 isl_alloc_type(S->getIslCtx().get(), struct gpu_stmt_access); 2773 Access->read = Acc->isRead(); 2774 Access->write = Acc->isWrite(); 2775 Access->access = Acc->getAccessRelation().release(); 2776 isl_space *Space = isl_map_get_space(Access->access); 2777 Space = isl_space_range(Space); 2778 Space = isl_space_from_range(Space); 2779 Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release()); 2780 isl_map *Universe = isl_map_universe(Space); 2781 Access->tagged_access = 2782 isl_map_domain_product(Acc->getAccessRelation().release(), Universe); 2783 Access->exact_write = !Acc->isMayWrite(); 2784 Access->ref_id = Acc->getId().release(); 2785 Access->next = Accesses; 2786 Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions(); 2787 // TODO: Also mark one-element accesses to arrays as fixed-element. 2788 Access->fixed_element = 2789 Acc->isLatestScalarKind() ? isl_bool_true : isl_bool_false; 2790 Accesses = Access; 2791 } 2792 2793 return Accesses; 2794 } 2795 2796 /// Collect the list of GPU statements. 2797 /// 2798 /// Each statement has an id, a pointer to the underlying data structure, 2799 /// as well as a list with all memory accesses. 2800 /// 2801 /// TODO: Initialize the list of memory accesses. 2802 /// 2803 /// @returns A linked-list of statements. 2804 gpu_stmt *getStatements() { 2805 gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx().get(), struct gpu_stmt, 2806 std::distance(S->begin(), S->end())); 2807 2808 int i = 0; 2809 for (auto &Stmt : *S) { 2810 gpu_stmt *GPUStmt = &Stmts[i]; 2811 2812 GPUStmt->id = Stmt.getDomainId().release(); 2813 2814 // We use the pet stmt pointer to keep track of the Polly statements. 2815 GPUStmt->stmt = (pet_stmt *)&Stmt; 2816 GPUStmt->accesses = getStmtAccesses(Stmt); 2817 i++; 2818 } 2819 2820 return Stmts; 2821 } 2822 2823 /// Derive the extent of an array. 2824 /// 2825 /// The extent of an array is the set of elements that are within the 2826 /// accessed array. For the inner dimensions, the extent constraints are 2827 /// 0 and the size of the corresponding array dimension. For the first 2828 /// (outermost) dimension, the extent constraints are the minimal and maximal 2829 /// subscript value for the first dimension. 2830 /// 2831 /// @param Array The array to derive the extent for. 2832 /// 2833 /// @returns An isl_set describing the extent of the array. 2834 isl::set getExtent(ScopArrayInfo *Array) { 2835 unsigned NumDims = Array->getNumberOfDimensions(); 2836 2837 if (Array->getNumberOfDimensions() == 0) 2838 return isl::set::universe(Array->getSpace()); 2839 2840 isl::union_map Accesses = S->getAccesses(Array); 2841 isl::union_set AccessUSet = Accesses.range(); 2842 AccessUSet = AccessUSet.coalesce(); 2843 AccessUSet = AccessUSet.detect_equalities(); 2844 AccessUSet = AccessUSet.coalesce(); 2845 2846 if (AccessUSet.is_empty()) 2847 return isl::set::empty(Array->getSpace()); 2848 2849 isl::set AccessSet = AccessUSet.extract_set(Array->getSpace()); 2850 2851 isl::local_space LS = isl::local_space(Array->getSpace()); 2852 2853 isl::pw_aff Val = isl::aff::var_on_domain(LS, isl::dim::set, 0); 2854 isl::pw_aff OuterMin = AccessSet.dim_min(0); 2855 isl::pw_aff OuterMax = AccessSet.dim_max(0); 2856 OuterMin = OuterMin.add_dims(isl::dim::in, Val.dim(isl::dim::in)); 2857 OuterMax = OuterMax.add_dims(isl::dim::in, Val.dim(isl::dim::in)); 2858 OuterMin = OuterMin.set_tuple_id(isl::dim::in, Array->getBasePtrId()); 2859 OuterMax = OuterMax.set_tuple_id(isl::dim::in, Array->getBasePtrId()); 2860 2861 isl::set Extent = isl::set::universe(Array->getSpace()); 2862 2863 Extent = Extent.intersect(OuterMin.le_set(Val)); 2864 Extent = Extent.intersect(OuterMax.ge_set(Val)); 2865 2866 for (unsigned i = 1; i < NumDims; ++i) 2867 Extent = Extent.lower_bound_si(isl::dim::set, i, 0); 2868 2869 for (unsigned i = 0; i < NumDims; ++i) { 2870 isl::pw_aff PwAff = Array->getDimensionSizePw(i); 2871 2872 // isl_pw_aff can be NULL for zero dimension. Only in the case of a 2873 // Fortran array will we have a legitimate dimension. 2874 if (PwAff.is_null()) { 2875 assert(i == 0 && "invalid dimension isl_pw_aff for nonzero dimension"); 2876 continue; 2877 } 2878 2879 isl::pw_aff Val = isl::aff::var_on_domain( 2880 isl::local_space(Array->getSpace()), isl::dim::set, i); 2881 PwAff = PwAff.add_dims(isl::dim::in, Val.dim(isl::dim::in)); 2882 PwAff = PwAff.set_tuple_id(isl::dim::in, Val.get_tuple_id(isl::dim::in)); 2883 isl::set Set = PwAff.gt_set(Val); 2884 Extent = Set.intersect(Extent); 2885 } 2886 2887 return Extent; 2888 } 2889 2890 /// Derive the bounds of an array. 2891 /// 2892 /// For the first dimension we derive the bound of the array from the extent 2893 /// of this dimension. For inner dimensions we obtain their size directly from 2894 /// ScopArrayInfo. 2895 /// 2896 /// @param PPCGArray The array to compute bounds for. 2897 /// @param Array The polly array from which to take the information. 2898 void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) { 2899 std::vector<isl_pw_aff *> Bounds; 2900 2901 if (PPCGArray.n_index > 0) { 2902 if (isl_set_is_empty(PPCGArray.extent)) { 2903 isl_set *Dom = isl_set_copy(PPCGArray.extent); 2904 isl_local_space *LS = isl_local_space_from_space( 2905 isl_space_params(isl_set_get_space(Dom))); 2906 isl_set_free(Dom); 2907 isl_pw_aff *Zero = isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS)); 2908 Bounds.push_back(Zero); 2909 } else { 2910 isl_set *Dom = isl_set_copy(PPCGArray.extent); 2911 Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1); 2912 isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0); 2913 isl_set_free(Dom); 2914 Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound)); 2915 isl_local_space *LS = 2916 isl_local_space_from_space(isl_set_get_space(Dom)); 2917 isl_aff *One = isl_aff_zero_on_domain(LS); 2918 One = isl_aff_add_constant_si(One, 1); 2919 Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One)); 2920 Bound = isl_pw_aff_gist(Bound, S->getContext().release()); 2921 Bounds.push_back(Bound); 2922 } 2923 } 2924 2925 for (unsigned i = 1; i < PPCGArray.n_index; ++i) { 2926 isl_pw_aff *Bound = Array->getDimensionSizePw(i).release(); 2927 auto LS = isl_pw_aff_get_domain_space(Bound); 2928 auto Aff = isl_multi_aff_zero(LS); 2929 2930 // We need types to work out, which is why we perform this weird dance 2931 // with `Aff` and `Bound`. Consider this example: 2932 2933 // LS: [p] -> { [] } 2934 // Zero: [p] -> { [] } | Implicitly, is [p] -> { ~ -> [] }. 2935 // This `~` is used to denote a "null space" (which is different from 2936 // a *zero dimensional* space), which is something that ISL does not 2937 // show you when pretty printing. 2938 2939 // Bound: [p] -> { [] -> [(10p)] } | Here, the [] is a *zero dimensional* 2940 // space, not a "null space" which does not exist at all. 2941 2942 // When we pullback (precompose) `Bound` with `Zero`, we get: 2943 // Bound . Zero = 2944 // ([p] -> { [] -> [(10p)] }) . ([p] -> {~ -> [] }) = 2945 // [p] -> { ~ -> [(10p)] } = 2946 // [p] -> [(10p)] (as ISL pretty prints it) 2947 // Bound Pullback: [p] -> { [(10p)] } 2948 2949 // We want this kind of an expression for Bound, without a 2950 // zero dimensional input, but with a "null space" input for the types 2951 // to work out later on, as far as I (Siddharth Bhat) understand. 2952 // I was unable to find a reference to this in the ISL manual. 2953 // References: Tobias Grosser. 2954 2955 Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff); 2956 Bounds.push_back(Bound); 2957 } 2958 2959 /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff` 2960 /// to have the same parameter dimensions. So, we need to align them to an 2961 /// appropriate space. 2962 /// Scop::Context is _not_ an appropriate space, because when we have 2963 /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not 2964 /// contain all parameter dimensions. 2965 /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together. 2966 isl_space *SeedAlignSpace = S->getParamSpace().release(); 2967 SeedAlignSpace = isl_space_add_dims(SeedAlignSpace, isl_dim_set, 1); 2968 2969 isl_space *AlignSpace = nullptr; 2970 std::vector<isl_pw_aff *> AlignedBounds; 2971 std::tie(AlignSpace, AlignedBounds) = 2972 alignPwAffs(std::move(Bounds), SeedAlignSpace); 2973 2974 assert(AlignSpace && "alignPwAffs did not initialise AlignSpace"); 2975 2976 isl_pw_aff_list *BoundsList = 2977 createPwAffList(S->getIslCtx().get(), std::move(AlignedBounds)); 2978 2979 isl_space *BoundsSpace = isl_set_get_space(PPCGArray.extent); 2980 BoundsSpace = isl_space_align_params(BoundsSpace, AlignSpace); 2981 2982 assert(BoundsSpace && "Unable to access space of array."); 2983 assert(BoundsList && "Unable to access list of bounds."); 2984 2985 PPCGArray.bound = 2986 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace, BoundsList); 2987 assert(PPCGArray.bound && "PPCGArray.bound was not constructed correctly."); 2988 } 2989 2990 /// Create the arrays for @p PPCGProg. 2991 /// 2992 /// @param PPCGProg The program to compute the arrays for. 2993 void createArrays(gpu_prog *PPCGProg, 2994 const SmallVector<ScopArrayInfo *, 4> &ValidSAIs) { 2995 int i = 0; 2996 for (auto &Array : ValidSAIs) { 2997 std::string TypeName; 2998 raw_string_ostream OS(TypeName); 2999 3000 OS << *Array->getElementType(); 3001 TypeName = OS.str(); 3002 3003 gpu_array_info &PPCGArray = PPCGProg->array[i]; 3004 3005 PPCGArray.space = Array->getSpace().release(); 3006 PPCGArray.type = strdup(TypeName.c_str()); 3007 PPCGArray.size = DL->getTypeAllocSize(Array->getElementType()); 3008 PPCGArray.name = strdup(Array->getName().c_str()); 3009 PPCGArray.extent = nullptr; 3010 PPCGArray.n_index = Array->getNumberOfDimensions(); 3011 PPCGArray.extent = getExtent(Array).release(); 3012 PPCGArray.n_ref = 0; 3013 PPCGArray.refs = nullptr; 3014 PPCGArray.accessed = true; 3015 PPCGArray.read_only_scalar = 3016 Array->isReadOnly() && Array->getNumberOfDimensions() == 0; 3017 PPCGArray.has_compound_element = false; 3018 PPCGArray.local = false; 3019 PPCGArray.declare_local = false; 3020 PPCGArray.global = false; 3021 PPCGArray.linearize = false; 3022 PPCGArray.dep_order = nullptr; 3023 PPCGArray.user = Array; 3024 3025 PPCGArray.bound = nullptr; 3026 setArrayBounds(PPCGArray, Array); 3027 i++; 3028 3029 collect_references(PPCGProg, &PPCGArray); 3030 PPCGArray.only_fixed_element = only_fixed_element_accessed(&PPCGArray); 3031 } 3032 } 3033 3034 /// Create an identity map between the arrays in the scop. 3035 /// 3036 /// @returns An identity map between the arrays in the scop. 3037 isl_union_map *getArrayIdentity() { 3038 isl_union_map *Maps = isl_union_map_empty(S->getParamSpace().release()); 3039 3040 for (auto &Array : S->arrays()) { 3041 isl_space *Space = Array->getSpace().release(); 3042 Space = isl_space_map_from_set(Space); 3043 isl_map *Identity = isl_map_identity(Space); 3044 Maps = isl_union_map_add_map(Maps, Identity); 3045 } 3046 3047 return Maps; 3048 } 3049 3050 /// Create a default-initialized PPCG GPU program. 3051 /// 3052 /// @returns A new gpu program description. 3053 gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) { 3054 3055 if (!PPCGScop) 3056 return nullptr; 3057 3058 auto PPCGProg = isl_calloc_type(S->getIslCtx().get(), struct gpu_prog); 3059 3060 PPCGProg->ctx = S->getIslCtx().get(); 3061 PPCGProg->scop = PPCGScop; 3062 PPCGProg->context = isl_set_copy(PPCGScop->context); 3063 PPCGProg->read = isl_union_map_copy(PPCGScop->reads); 3064 PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes); 3065 PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes); 3066 PPCGProg->tagged_must_kill = 3067 isl_union_map_copy(PPCGScop->tagged_must_kills); 3068 PPCGProg->to_inner = getArrayIdentity(); 3069 PPCGProg->to_outer = getArrayIdentity(); 3070 // TODO: verify that this assignment is correct. 3071 PPCGProg->any_to_outer = nullptr; 3072 PPCGProg->n_stmts = std::distance(S->begin(), S->end()); 3073 PPCGProg->stmts = getStatements(); 3074 3075 // Only consider arrays that have a non-empty extent. 3076 // Otherwise, this will cause us to consider the following kinds of 3077 // empty arrays: 3078 // 1. Invariant loads that are represented by SAI objects. 3079 // 2. Arrays with statically known zero size. 3080 auto ValidSAIsRange = 3081 make_filter_range(S->arrays(), [this](ScopArrayInfo *SAI) -> bool { 3082 return !getExtent(SAI).is_empty(); 3083 }); 3084 SmallVector<ScopArrayInfo *, 4> ValidSAIs(ValidSAIsRange.begin(), 3085 ValidSAIsRange.end()); 3086 3087 PPCGProg->n_array = 3088 ValidSAIs.size(); // std::distance(S->array_begin(), S->array_end()); 3089 PPCGProg->array = isl_calloc_array( 3090 S->getIslCtx().get(), struct gpu_array_info, PPCGProg->n_array); 3091 3092 createArrays(PPCGProg, ValidSAIs); 3093 3094 PPCGProg->array_order = nullptr; 3095 collect_order_dependences(PPCGProg); 3096 3097 PPCGProg->may_persist = compute_may_persist(PPCGProg); 3098 return PPCGProg; 3099 } 3100 3101 struct PrintGPUUserData { 3102 struct cuda_info *CudaInfo; 3103 struct gpu_prog *PPCGProg; 3104 std::vector<ppcg_kernel *> Kernels; 3105 }; 3106 3107 /// Print a user statement node in the host code. 3108 /// 3109 /// We use ppcg's printing facilities to print the actual statement and 3110 /// additionally build up a list of all kernels that are encountered in the 3111 /// host ast. 3112 /// 3113 /// @param P The printer to print to 3114 /// @param Options The printing options to use 3115 /// @param Node The node to print 3116 /// @param User A user pointer to carry additional data. This pointer is 3117 /// expected to be of type PrintGPUUserData. 3118 /// 3119 /// @returns A printer to which the output has been printed. 3120 static __isl_give isl_printer * 3121 printHostUser(__isl_take isl_printer *P, 3122 __isl_take isl_ast_print_options *Options, 3123 __isl_take isl_ast_node *Node, void *User) { 3124 auto Data = (struct PrintGPUUserData *)User; 3125 auto Id = isl_ast_node_get_annotation(Node); 3126 3127 if (Id) { 3128 bool IsUser = !strcmp(isl_id_get_name(Id), "user"); 3129 3130 // If this is a user statement, format it ourselves as ppcg would 3131 // otherwise try to call pet functionality that is not available in 3132 // Polly. 3133 if (IsUser) { 3134 P = isl_printer_start_line(P); 3135 P = isl_printer_print_ast_node(P, Node); 3136 P = isl_printer_end_line(P); 3137 isl_id_free(Id); 3138 isl_ast_print_options_free(Options); 3139 return P; 3140 } 3141 3142 auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id); 3143 isl_id_free(Id); 3144 Data->Kernels.push_back(Kernel); 3145 } 3146 3147 return print_host_user(P, Options, Node, User); 3148 } 3149 3150 /// Print C code corresponding to the control flow in @p Kernel. 3151 /// 3152 /// @param Kernel The kernel to print 3153 void printKernel(ppcg_kernel *Kernel) { 3154 auto *P = isl_printer_to_str(S->getIslCtx().get()); 3155 P = isl_printer_set_output_format(P, ISL_FORMAT_C); 3156 auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get()); 3157 P = isl_ast_node_print(Kernel->tree, P, Options); 3158 char *String = isl_printer_get_str(P); 3159 outs() << String << "\n"; 3160 free(String); 3161 isl_printer_free(P); 3162 } 3163 3164 /// Print C code corresponding to the GPU code described by @p Tree. 3165 /// 3166 /// @param Tree An AST describing GPU code 3167 /// @param PPCGProg The PPCG program from which @Tree has been constructed. 3168 void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) { 3169 auto *P = isl_printer_to_str(S->getIslCtx().get()); 3170 P = isl_printer_set_output_format(P, ISL_FORMAT_C); 3171 3172 PrintGPUUserData Data; 3173 Data.PPCGProg = PPCGProg; 3174 3175 auto *Options = isl_ast_print_options_alloc(S->getIslCtx().get()); 3176 Options = 3177 isl_ast_print_options_set_print_user(Options, printHostUser, &Data); 3178 P = isl_ast_node_print(Tree, P, Options); 3179 char *String = isl_printer_get_str(P); 3180 outs() << "# host\n"; 3181 outs() << String << "\n"; 3182 free(String); 3183 isl_printer_free(P); 3184 3185 for (auto Kernel : Data.Kernels) { 3186 outs() << "# kernel" << Kernel->id << "\n"; 3187 printKernel(Kernel); 3188 } 3189 } 3190 3191 // Generate a GPU program using PPCG. 3192 // 3193 // GPU mapping consists of multiple steps: 3194 // 3195 // 1) Compute new schedule for the program. 3196 // 2) Map schedule to GPU (TODO) 3197 // 3) Generate code for new schedule (TODO) 3198 // 3199 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer 3200 // is mostly CPU specific. Instead, we use PPCG's GPU code generation 3201 // strategy directly from this pass. 3202 gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) { 3203 3204 auto PPCGGen = isl_calloc_type(S->getIslCtx().get(), struct gpu_gen); 3205 3206 PPCGGen->ctx = S->getIslCtx().get(); 3207 PPCGGen->options = PPCGScop->options; 3208 PPCGGen->print = nullptr; 3209 PPCGGen->print_user = nullptr; 3210 PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt; 3211 PPCGGen->prog = PPCGProg; 3212 PPCGGen->tree = nullptr; 3213 PPCGGen->types.n = 0; 3214 PPCGGen->types.name = nullptr; 3215 PPCGGen->sizes = nullptr; 3216 PPCGGen->used_sizes = nullptr; 3217 PPCGGen->kernel_id = 0; 3218 3219 // Set scheduling strategy to same strategy PPCG is using. 3220 isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true); 3221 isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true); 3222 isl_options_set_schedule_whole_component(PPCGGen->ctx, false); 3223 3224 isl_schedule *Schedule = get_schedule(PPCGGen); 3225 3226 int has_permutable = has_any_permutable_node(Schedule); 3227 3228 Schedule = 3229 isl_schedule_align_params(Schedule, S->getFullParamSpace().release()); 3230 3231 if (!has_permutable || has_permutable < 0) { 3232 Schedule = isl_schedule_free(Schedule); 3233 LLVM_DEBUG(dbgs() << getUniqueScopName(S) 3234 << " does not have permutable bands. Bailing out\n";); 3235 } else { 3236 const bool CreateTransferToFromDevice = !PollyManagedMemory; 3237 Schedule = map_to_device(PPCGGen, Schedule, CreateTransferToFromDevice); 3238 PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule)); 3239 } 3240 3241 if (DumpSchedule) { 3242 isl_printer *P = isl_printer_to_str(S->getIslCtx().get()); 3243 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK); 3244 P = isl_printer_print_str(P, "Schedule\n"); 3245 P = isl_printer_print_str(P, "========\n"); 3246 if (Schedule) 3247 P = isl_printer_print_schedule(P, Schedule); 3248 else 3249 P = isl_printer_print_str(P, "No schedule found\n"); 3250 3251 outs() << isl_printer_get_str(P) << "\n"; 3252 isl_printer_free(P); 3253 } 3254 3255 if (DumpCode) { 3256 outs() << "Code\n"; 3257 outs() << "====\n"; 3258 if (PPCGGen->tree) 3259 printGPUTree(PPCGGen->tree, PPCGProg); 3260 else 3261 outs() << "No code generated\n"; 3262 } 3263 3264 isl_schedule_free(Schedule); 3265 3266 return PPCGGen; 3267 } 3268 3269 /// Free gpu_gen structure. 3270 /// 3271 /// @param PPCGGen The ppcg_gen object to free. 3272 void freePPCGGen(gpu_gen *PPCGGen) { 3273 isl_ast_node_free(PPCGGen->tree); 3274 isl_union_map_free(PPCGGen->sizes); 3275 isl_union_map_free(PPCGGen->used_sizes); 3276 free(PPCGGen); 3277 } 3278 3279 /// Free the options in the ppcg scop structure. 3280 /// 3281 /// ppcg is not freeing these options for us. To avoid leaks we do this 3282 /// ourselves. 3283 /// 3284 /// @param PPCGScop The scop referencing the options to free. 3285 void freeOptions(ppcg_scop *PPCGScop) { 3286 free(PPCGScop->options->debug); 3287 PPCGScop->options->debug = nullptr; 3288 free(PPCGScop->options); 3289 PPCGScop->options = nullptr; 3290 } 3291 3292 /// Approximate the number of points in the set. 3293 /// 3294 /// This function returns an ast expression that overapproximates the number 3295 /// of points in an isl set through the rectangular hull surrounding this set. 3296 /// 3297 /// @param Set The set to count. 3298 /// @param Build The isl ast build object to use for creating the ast 3299 /// expression. 3300 /// 3301 /// @returns An approximation of the number of points in the set. 3302 __isl_give isl_ast_expr *approxPointsInSet(__isl_take isl_set *Set, 3303 __isl_keep isl_ast_build *Build) { 3304 3305 isl_val *One = isl_val_int_from_si(isl_set_get_ctx(Set), 1); 3306 auto *Expr = isl_ast_expr_from_val(isl_val_copy(One)); 3307 3308 isl_space *Space = isl_set_get_space(Set); 3309 Space = isl_space_params(Space); 3310 auto *Univ = isl_set_universe(Space); 3311 isl_pw_aff *OneAff = isl_pw_aff_val_on_domain(Univ, One); 3312 3313 for (long i = 0, n = isl_set_dim(Set, isl_dim_set); i < n; i++) { 3314 isl_pw_aff *Max = isl_set_dim_max(isl_set_copy(Set), i); 3315 isl_pw_aff *Min = isl_set_dim_min(isl_set_copy(Set), i); 3316 isl_pw_aff *DimSize = isl_pw_aff_sub(Max, Min); 3317 DimSize = isl_pw_aff_add(DimSize, isl_pw_aff_copy(OneAff)); 3318 auto DimSizeExpr = isl_ast_build_expr_from_pw_aff(Build, DimSize); 3319 Expr = isl_ast_expr_mul(Expr, DimSizeExpr); 3320 } 3321 3322 isl_set_free(Set); 3323 isl_pw_aff_free(OneAff); 3324 3325 return Expr; 3326 } 3327 3328 /// Approximate a number of dynamic instructions executed by a given 3329 /// statement. 3330 /// 3331 /// @param Stmt The statement for which to compute the number of dynamic 3332 /// instructions. 3333 /// @param Build The isl ast build object to use for creating the ast 3334 /// expression. 3335 /// @returns An approximation of the number of dynamic instructions executed 3336 /// by @p Stmt. 3337 __isl_give isl_ast_expr *approxDynamicInst(ScopStmt &Stmt, 3338 __isl_keep isl_ast_build *Build) { 3339 auto Iterations = approxPointsInSet(Stmt.getDomain().release(), Build); 3340 3341 long InstCount = 0; 3342 3343 if (Stmt.isBlockStmt()) { 3344 auto *BB = Stmt.getBasicBlock(); 3345 InstCount = std::distance(BB->begin(), BB->end()); 3346 } else { 3347 auto *R = Stmt.getRegion(); 3348 3349 for (auto *BB : R->blocks()) { 3350 InstCount += std::distance(BB->begin(), BB->end()); 3351 } 3352 } 3353 3354 isl_val *InstVal = isl_val_int_from_si(S->getIslCtx().get(), InstCount); 3355 auto *InstExpr = isl_ast_expr_from_val(InstVal); 3356 return isl_ast_expr_mul(InstExpr, Iterations); 3357 } 3358 3359 /// Approximate dynamic instructions executed in scop. 3360 /// 3361 /// @param S The scop for which to approximate dynamic instructions. 3362 /// @param Build The isl ast build object to use for creating the ast 3363 /// expression. 3364 /// @returns An approximation of the number of dynamic instructions executed 3365 /// in @p S. 3366 __isl_give isl_ast_expr * 3367 getNumberOfIterations(Scop &S, __isl_keep isl_ast_build *Build) { 3368 isl_ast_expr *Instructions; 3369 3370 isl_val *Zero = isl_val_int_from_si(S.getIslCtx().get(), 0); 3371 Instructions = isl_ast_expr_from_val(Zero); 3372 3373 for (ScopStmt &Stmt : S) { 3374 isl_ast_expr *StmtInstructions = approxDynamicInst(Stmt, Build); 3375 Instructions = isl_ast_expr_add(Instructions, StmtInstructions); 3376 } 3377 return Instructions; 3378 } 3379 3380 /// Create a check that ensures sufficient compute in scop. 3381 /// 3382 /// @param S The scop for which to ensure sufficient compute. 3383 /// @param Build The isl ast build object to use for creating the ast 3384 /// expression. 3385 /// @returns An expression that evaluates to TRUE in case of sufficient 3386 /// compute and to FALSE, otherwise. 3387 __isl_give isl_ast_expr * 3388 createSufficientComputeCheck(Scop &S, __isl_keep isl_ast_build *Build) { 3389 auto Iterations = getNumberOfIterations(S, Build); 3390 auto *MinComputeVal = isl_val_int_from_si(S.getIslCtx().get(), MinCompute); 3391 auto *MinComputeExpr = isl_ast_expr_from_val(MinComputeVal); 3392 return isl_ast_expr_ge(Iterations, MinComputeExpr); 3393 } 3394 3395 /// Check if the basic block contains a function we cannot codegen for GPU 3396 /// kernels. 3397 /// 3398 /// If this basic block does something with a `Function` other than calling 3399 /// a function that we support in a kernel, return true. 3400 bool containsInvalidKernelFunctionInBlock(const BasicBlock *BB, 3401 bool AllowCUDALibDevice) { 3402 for (const Instruction &Inst : *BB) { 3403 const CallInst *Call = dyn_cast<CallInst>(&Inst); 3404 if (Call && isValidFunctionInKernel(Call->getCalledFunction(), 3405 AllowCUDALibDevice)) 3406 continue; 3407 3408 for (Value *Op : Inst.operands()) 3409 // Look for (<func-type>*) among operands of Inst 3410 if (auto PtrTy = dyn_cast<PointerType>(Op->getType())) { 3411 if (isa<FunctionType>(PtrTy->getElementType())) { 3412 LLVM_DEBUG(dbgs() 3413 << Inst << " has illegal use of function in kernel.\n"); 3414 return true; 3415 } 3416 } 3417 } 3418 return false; 3419 } 3420 3421 /// Return whether the Scop S uses functions in a way that we do not support. 3422 bool containsInvalidKernelFunction(const Scop &S, bool AllowCUDALibDevice) { 3423 for (auto &Stmt : S) { 3424 if (Stmt.isBlockStmt()) { 3425 if (containsInvalidKernelFunctionInBlock(Stmt.getBasicBlock(), 3426 AllowCUDALibDevice)) 3427 return true; 3428 } else { 3429 assert(Stmt.isRegionStmt() && 3430 "Stmt was neither block nor region statement"); 3431 for (const BasicBlock *BB : Stmt.getRegion()->blocks()) 3432 if (containsInvalidKernelFunctionInBlock(BB, AllowCUDALibDevice)) 3433 return true; 3434 } 3435 } 3436 return false; 3437 } 3438 3439 /// Generate code for a given GPU AST described by @p Root. 3440 /// 3441 /// @param Root An isl_ast_node pointing to the root of the GPU AST. 3442 /// @param Prog The GPU Program to generate code for. 3443 void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) { 3444 ScopAnnotator Annotator; 3445 Annotator.buildAliasScopes(*S); 3446 3447 Region *R = &S->getRegion(); 3448 3449 simplifyRegion(R, DT, LI, RI); 3450 3451 BasicBlock *EnteringBB = R->getEnteringBlock(); 3452 3453 PollyIRBuilder Builder = createPollyIRBuilder(EnteringBB, Annotator); 3454 3455 // Only build the run-time condition and parameters _after_ having 3456 // introduced the conditional branch. This is important as the conditional 3457 // branch will guard the original scop from new induction variables that 3458 // the SCEVExpander may introduce while code generating the parameters and 3459 // which may introduce scalar dependences that prevent us from correctly 3460 // code generating this scop. 3461 BBPair StartExitBlocks; 3462 BranchInst *CondBr = nullptr; 3463 std::tie(StartExitBlocks, CondBr) = 3464 executeScopConditionally(*S, Builder.getTrue(), *DT, *RI, *LI); 3465 BasicBlock *StartBlock = std::get<0>(StartExitBlocks); 3466 3467 assert(CondBr && "CondBr not initialized by executeScopConditionally"); 3468 3469 GPUNodeBuilder NodeBuilder(Builder, Annotator, *DL, *LI, *SE, *DT, *S, 3470 StartBlock, Prog, Runtime, Architecture); 3471 3472 // TODO: Handle LICM 3473 auto SplitBlock = StartBlock->getSinglePredecessor(); 3474 Builder.SetInsertPoint(SplitBlock->getTerminator()); 3475 3476 isl_ast_build *Build = isl_ast_build_alloc(S->getIslCtx().get()); 3477 isl_ast_expr *Condition = IslAst::buildRunCondition(*S, Build); 3478 isl_ast_expr *SufficientCompute = createSufficientComputeCheck(*S, Build); 3479 Condition = isl_ast_expr_and(Condition, SufficientCompute); 3480 isl_ast_build_free(Build); 3481 3482 // preload invariant loads. Note: This should happen before the RTC 3483 // because the RTC may depend on values that are invariant load hoisted. 3484 if (!NodeBuilder.preloadInvariantLoads()) { 3485 // Patch the introduced branch condition to ensure that we always execute 3486 // the original SCoP. 3487 auto *FalseI1 = Builder.getFalse(); 3488 auto *SplitBBTerm = Builder.GetInsertBlock()->getTerminator(); 3489 SplitBBTerm->setOperand(0, FalseI1); 3490 3491 LLVM_DEBUG(dbgs() << "preloading invariant loads failed in function: " + 3492 S->getFunction().getName() + 3493 " | Scop Region: " + S->getNameStr()); 3494 // adjust the dominator tree accordingly. 3495 auto *ExitingBlock = StartBlock->getUniqueSuccessor(); 3496 assert(ExitingBlock); 3497 auto *MergeBlock = ExitingBlock->getUniqueSuccessor(); 3498 assert(MergeBlock); 3499 polly::markBlockUnreachable(*StartBlock, Builder); 3500 polly::markBlockUnreachable(*ExitingBlock, Builder); 3501 auto *ExitingBB = S->getExitingBlock(); 3502 assert(ExitingBB); 3503 3504 DT->changeImmediateDominator(MergeBlock, ExitingBB); 3505 DT->eraseNode(ExitingBlock); 3506 isl_ast_expr_free(Condition); 3507 isl_ast_node_free(Root); 3508 } else { 3509 3510 if (polly::PerfMonitoring) { 3511 PerfMonitor P(*S, EnteringBB->getParent()->getParent()); 3512 P.initialize(); 3513 P.insertRegionStart(SplitBlock->getTerminator()); 3514 3515 // TODO: actually think if this is the correct exiting block to place 3516 // the `end` performance marker. Invariant load hoisting changes 3517 // the CFG in a way that I do not precisely understand, so I 3518 // (Siddharth<[email protected]>) should come back to this and 3519 // think about which exiting block to use. 3520 auto *ExitingBlock = StartBlock->getUniqueSuccessor(); 3521 assert(ExitingBlock); 3522 BasicBlock *MergeBlock = ExitingBlock->getUniqueSuccessor(); 3523 P.insertRegionEnd(MergeBlock->getTerminator()); 3524 } 3525 3526 NodeBuilder.addParameters(S->getContext().release()); 3527 Value *RTC = NodeBuilder.createRTC(Condition); 3528 Builder.GetInsertBlock()->getTerminator()->setOperand(0, RTC); 3529 3530 Builder.SetInsertPoint(&*StartBlock->begin()); 3531 3532 NodeBuilder.create(Root); 3533 } 3534 3535 /// In case a sequential kernel has more surrounding loops as any parallel 3536 /// kernel, the SCoP is probably mostly sequential. Hence, there is no 3537 /// point in running it on a GPU. 3538 if (NodeBuilder.DeepestSequential > NodeBuilder.DeepestParallel) 3539 CondBr->setOperand(0, Builder.getFalse()); 3540 3541 if (!NodeBuilder.BuildSuccessful) 3542 CondBr->setOperand(0, Builder.getFalse()); 3543 } 3544 3545 bool runOnScop(Scop &CurrentScop) override { 3546 S = &CurrentScop; 3547 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3548 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3549 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 3550 DL = &S->getRegion().getEntry()->getModule()->getDataLayout(); 3551 RI = &getAnalysis<RegionInfoPass>().getRegionInfo(); 3552 3553 LLVM_DEBUG(dbgs() << "PPCGCodeGen running on : " << getUniqueScopName(S) 3554 << " | loop depth: " << S->getMaxLoopDepth() << "\n"); 3555 3556 // We currently do not support functions other than intrinsics inside 3557 // kernels, as code generation will need to offload function calls to the 3558 // kernel. This may lead to a kernel trying to call a function on the host. 3559 // This also allows us to prevent codegen from trying to take the 3560 // address of an intrinsic function to send to the kernel. 3561 if (containsInvalidKernelFunction(CurrentScop, 3562 Architecture == GPUArch::NVPTX64)) { 3563 LLVM_DEBUG( 3564 dbgs() << getUniqueScopName(S) 3565 << " contains function which cannot be materialised in a GPU " 3566 "kernel. Bailing out.\n";); 3567 return false; 3568 } 3569 3570 auto PPCGScop = createPPCGScop(); 3571 auto PPCGProg = createPPCGProg(PPCGScop); 3572 auto PPCGGen = generateGPU(PPCGScop, PPCGProg); 3573 3574 if (PPCGGen->tree) { 3575 generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg); 3576 CurrentScop.markAsToBeSkipped(); 3577 } else { 3578 LLVM_DEBUG(dbgs() << getUniqueScopName(S) 3579 << " has empty PPCGGen->tree. Bailing out.\n"); 3580 } 3581 3582 freeOptions(PPCGScop); 3583 freePPCGGen(PPCGGen); 3584 gpu_prog_free(PPCGProg); 3585 ppcg_scop_free(PPCGScop); 3586 3587 return true; 3588 } 3589 3590 void printScop(raw_ostream &, Scop &) const override {} 3591 3592 void getAnalysisUsage(AnalysisUsage &AU) const override { 3593 ScopPass::getAnalysisUsage(AU); 3594 3595 AU.addRequired<DominatorTreeWrapperPass>(); 3596 AU.addRequired<RegionInfoPass>(); 3597 AU.addRequired<ScalarEvolutionWrapperPass>(); 3598 AU.addRequired<ScopDetectionWrapperPass>(); 3599 AU.addRequired<ScopInfoRegionPass>(); 3600 AU.addRequired<LoopInfoWrapperPass>(); 3601 3602 // FIXME: We do not yet add regions for the newly generated code to the 3603 // region tree. 3604 } 3605 }; 3606 } // namespace 3607 3608 char PPCGCodeGeneration::ID = 1; 3609 3610 Pass *polly::createPPCGCodeGenerationPass(GPUArch Arch, GPURuntime Runtime) { 3611 PPCGCodeGeneration *generator = new PPCGCodeGeneration(); 3612 generator->Runtime = Runtime; 3613 generator->Architecture = Arch; 3614 return generator; 3615 } 3616 3617 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg", 3618 "Polly - Apply PPCG translation to SCOP", false, false) 3619 INITIALIZE_PASS_DEPENDENCY(DependenceInfo); 3620 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass); 3621 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass); 3622 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass); 3623 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass); 3624 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass); 3625 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg", 3626 "Polly - Apply PPCG translation to SCOP", false, false) 3627