1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Take a scop created by ScopInfo and map it to GPU code using the ppcg 11 // GPU mapping strategy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "polly/CodeGen/PPCGCodeGeneration.h" 16 #include "polly/CodeGen/CodeGeneration.h" 17 #include "polly/CodeGen/IslAst.h" 18 #include "polly/CodeGen/IslNodeBuilder.h" 19 #include "polly/CodeGen/PerfMonitor.h" 20 #include "polly/CodeGen/Utils.h" 21 #include "polly/DependenceInfo.h" 22 #include "polly/LinkAllPasses.h" 23 #include "polly/Options.h" 24 #include "polly/ScopDetection.h" 25 #include "polly/ScopInfo.h" 26 #include "polly/Support/SCEVValidator.h" 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/BasicAliasAnalysis.h" 30 #include "llvm/Analysis/GlobalsModRef.h" 31 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 32 #include "llvm/Analysis/TargetLibraryInfo.h" 33 #include "llvm/Analysis/TargetTransformInfo.h" 34 #include "llvm/IR/LegacyPassManager.h" 35 #include "llvm/IR/Verifier.h" 36 #include "llvm/IRReader/IRReader.h" 37 #include "llvm/Linker/Linker.h" 38 #include "llvm/Support/TargetRegistry.h" 39 #include "llvm/Support/TargetSelect.h" 40 #include "llvm/Target/TargetMachine.h" 41 #include "llvm/Transforms/IPO/PassManagerBuilder.h" 42 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 43 44 #include "isl/union_map.h" 45 46 extern "C" { 47 #include "ppcg/cuda.h" 48 #include "ppcg/gpu.h" 49 #include "ppcg/gpu_print.h" 50 #include "ppcg/ppcg.h" 51 #include "ppcg/schedule.h" 52 } 53 54 #include "llvm/Support/Debug.h" 55 56 using namespace polly; 57 using namespace llvm; 58 59 #define DEBUG_TYPE "polly-codegen-ppcg" 60 61 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule", 62 cl::desc("Dump the computed GPU Schedule"), 63 cl::Hidden, cl::init(false), cl::ZeroOrMore, 64 cl::cat(PollyCategory)); 65 66 static cl::opt<bool> 67 DumpCode("polly-acc-dump-code", 68 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden, 69 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 70 71 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir", 72 cl::desc("Dump the kernel LLVM-IR"), 73 cl::Hidden, cl::init(false), cl::ZeroOrMore, 74 cl::cat(PollyCategory)); 75 76 static cl::opt<bool> DumpKernelASM("polly-acc-dump-kernel-asm", 77 cl::desc("Dump the kernel assembly code"), 78 cl::Hidden, cl::init(false), cl::ZeroOrMore, 79 cl::cat(PollyCategory)); 80 81 static cl::opt<bool> FastMath("polly-acc-fastmath", 82 cl::desc("Allow unsafe math optimizations"), 83 cl::Hidden, cl::init(false), cl::ZeroOrMore, 84 cl::cat(PollyCategory)); 85 static cl::opt<bool> SharedMemory("polly-acc-use-shared", 86 cl::desc("Use shared memory"), cl::Hidden, 87 cl::init(false), cl::ZeroOrMore, 88 cl::cat(PollyCategory)); 89 static cl::opt<bool> PrivateMemory("polly-acc-use-private", 90 cl::desc("Use private memory"), cl::Hidden, 91 cl::init(false), cl::ZeroOrMore, 92 cl::cat(PollyCategory)); 93 94 bool polly::PollyManagedMemory; 95 static cl::opt<bool, true> 96 XManagedMemory("polly-acc-codegen-managed-memory", 97 cl::desc("Generate Host kernel code assuming" 98 " that all memory has been" 99 " declared as managed memory"), 100 cl::location(PollyManagedMemory), cl::Hidden, 101 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 102 103 static cl::opt<bool> 104 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure", 105 cl::desc("Fail and generate a backtrace if" 106 " verifyModule fails on the GPU " 107 " kernel module."), 108 cl::Hidden, cl::init(false), cl::ZeroOrMore, 109 cl::cat(PollyCategory)); 110 111 static cl::opt<std::string> CUDALibDevice( 112 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden, 113 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"), 114 cl::ZeroOrMore, cl::cat(PollyCategory)); 115 116 static cl::opt<std::string> 117 CudaVersion("polly-acc-cuda-version", 118 cl::desc("The CUDA version to compile for"), cl::Hidden, 119 cl::init("sm_30"), cl::ZeroOrMore, cl::cat(PollyCategory)); 120 121 static cl::opt<int> 122 MinCompute("polly-acc-mincompute", 123 cl::desc("Minimal number of compute statements to run on GPU."), 124 cl::Hidden, cl::init(10 * 512 * 512)); 125 126 extern bool polly::PerfMonitoring; 127 128 /// Return a unique name for a Scop, which is the scop region with the 129 /// function name. 130 std::string getUniqueScopName(const Scop *S) { 131 return "Scop Region: " + S->getNameStr() + 132 " | Function: " + std::string(S->getFunction().getName()); 133 } 134 135 /// Used to store information PPCG wants for kills. This information is 136 /// used by live range reordering. 137 /// 138 /// @see computeLiveRangeReordering 139 /// @see GPUNodeBuilder::createPPCGScop 140 /// @see GPUNodeBuilder::createPPCGProg 141 struct MustKillsInfo { 142 /// Collection of all kill statements that will be sequenced at the end of 143 /// PPCGScop->schedule. 144 /// 145 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set` 146 /// which merges schedules in *arbitrary* order. 147 /// (we don't care about the order of the kills anyway). 148 isl::schedule KillsSchedule; 149 /// Map from kill statement instances to scalars that need to be 150 /// killed. 151 /// 152 /// We currently derive kill information for: 153 /// 1. phi nodes. PHI nodes are not alive outside the scop and can 154 /// consequently all be killed. 155 /// 2. Scalar arrays that are not used outside the Scop. This is 156 /// checked by `isScalarUsesContainedInScop`. 157 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] } 158 isl::union_map TaggedMustKills; 159 160 /// Tagged must kills stripped of the tags. 161 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] } 162 isl::union_map MustKills; 163 164 MustKillsInfo() : KillsSchedule(nullptr) {} 165 }; 166 167 /// Check if SAI's uses are entirely contained within Scop S. 168 /// If a scalar is used only with a Scop, we are free to kill it, as no data 169 /// can flow in/out of the value any more. 170 /// @see computeMustKillsInfo 171 static bool isScalarUsesContainedInScop(const Scop &S, 172 const ScopArrayInfo *SAI) { 173 assert(SAI->isValueKind() && "this function only deals with scalars." 174 " Dealing with arrays required alias analysis"); 175 176 const Region &R = S.getRegion(); 177 for (User *U : SAI->getBasePtr()->users()) { 178 Instruction *I = dyn_cast<Instruction>(U); 179 assert(I && "invalid user of scop array info"); 180 if (!R.contains(I)) 181 return false; 182 } 183 return true; 184 } 185 186 /// Compute must-kills needed to enable live range reordering with PPCG. 187 /// 188 /// @params S The Scop to compute live range reordering information 189 /// @returns live range reordering information that can be used to setup 190 /// PPCG. 191 static MustKillsInfo computeMustKillsInfo(const Scop &S) { 192 const isl::space ParamSpace = S.getParamSpace(); 193 MustKillsInfo Info; 194 195 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria: 196 // 1.1 phi nodes in scop. 197 // 1.2 scalars that are only used within the scop 198 SmallVector<isl::id, 4> KillMemIds; 199 for (ScopArrayInfo *SAI : S.arrays()) { 200 if (SAI->isPHIKind() || 201 (SAI->isValueKind() && isScalarUsesContainedInScop(S, SAI))) 202 KillMemIds.push_back(isl::manage(SAI->getBasePtrId().release())); 203 } 204 205 Info.TaggedMustKills = isl::union_map::empty(ParamSpace); 206 Info.MustKills = isl::union_map::empty(ParamSpace); 207 208 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the 209 // schedule: 210 // - filter: "[control] -> { }" 211 // So, we choose to not create this to keep the output a little nicer, 212 // at the cost of some code complexity. 213 Info.KillsSchedule = nullptr; 214 215 for (isl::id &ToKillId : KillMemIds) { 216 isl::id KillStmtId = isl::id::alloc( 217 S.getIslCtx(), 218 std::string("SKill_phantom_").append(ToKillId.get_name()), nullptr); 219 220 // NOTE: construction of tagged_must_kill: 221 // 2. We need to construct a map: 222 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] } 223 // To construct this, we use `isl_map_domain_product` on 2 maps`: 224 // 2a. StmtToScalar: 225 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] } 226 // 2b. PhantomRefToScalar: 227 // [param] -> { ref_phantom[] -> scalar_to_kill[] } 228 // 229 // Combining these with `isl_map_domain_product` gives us 230 // TaggedMustKill: 231 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] } 232 233 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] } 234 isl::map StmtToScalar = isl::map::universe(ParamSpace); 235 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::in, isl::id(KillStmtId)); 236 StmtToScalar = StmtToScalar.set_tuple_id(isl::dim::out, isl::id(ToKillId)); 237 238 isl::id PhantomRefId = isl::id::alloc( 239 S.getIslCtx(), std::string("ref_phantom") + ToKillId.get_name(), 240 nullptr); 241 242 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] } 243 isl::map PhantomRefToScalar = isl::map::universe(ParamSpace); 244 PhantomRefToScalar = 245 PhantomRefToScalar.set_tuple_id(isl::dim::in, PhantomRefId); 246 PhantomRefToScalar = 247 PhantomRefToScalar.set_tuple_id(isl::dim::out, ToKillId); 248 249 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] } 250 isl::map TaggedMustKill = StmtToScalar.domain_product(PhantomRefToScalar); 251 Info.TaggedMustKills = Info.TaggedMustKills.unite(TaggedMustKill); 252 253 // 2. [param] -> { Stmt[] -> scalar_to_kill[] } 254 Info.MustKills = Info.TaggedMustKills.domain_factor_domain(); 255 256 // 3. Create the kill schedule of the form: 257 // "[param] -> { Stmt_phantom[] }" 258 // Then add this to Info.KillsSchedule. 259 isl::space KillStmtSpace = ParamSpace; 260 KillStmtSpace = KillStmtSpace.set_tuple_id(isl::dim::set, KillStmtId); 261 isl::union_set KillStmtDomain = isl::set::universe(KillStmtSpace); 262 263 isl::schedule KillSchedule = isl::schedule::from_domain(KillStmtDomain); 264 if (Info.KillsSchedule) 265 Info.KillsSchedule = Info.KillsSchedule.set(KillSchedule); 266 else 267 Info.KillsSchedule = KillSchedule; 268 } 269 270 return Info; 271 } 272 273 /// Create the ast expressions for a ScopStmt. 274 /// 275 /// This function is a callback for to generate the ast expressions for each 276 /// of the scheduled ScopStmts. 277 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt( 278 void *StmtT, __isl_take isl_ast_build *Build_C, 279 isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA, 280 isl_id *Id, void *User), 281 void *UserIndex, 282 isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User), 283 void *UserExpr) { 284 285 ScopStmt *Stmt = (ScopStmt *)StmtT; 286 287 if (!Stmt || !Build_C) 288 return NULL; 289 290 isl::ast_build Build = isl::manage(isl_ast_build_copy(Build_C)); 291 isl::ctx Ctx = Build.get_ctx(); 292 isl::id_to_ast_expr RefToExpr = isl::id_to_ast_expr::alloc(Ctx, 0); 293 294 Stmt->setAstBuild(Build); 295 296 for (MemoryAccess *Acc : *Stmt) { 297 isl::map AddrFunc = Acc->getAddressFunction(); 298 AddrFunc = AddrFunc.intersect_domain(Stmt->getDomain()); 299 300 isl::id RefId = Acc->getId(); 301 isl::pw_multi_aff PMA = isl::pw_multi_aff::from_map(AddrFunc); 302 303 isl::multi_pw_aff MPA = isl::multi_pw_aff(PMA); 304 MPA = MPA.coalesce(); 305 MPA = isl::manage(FunctionIndex(MPA.release(), RefId.get(), UserIndex)); 306 307 isl::ast_expr Access = Build.access_from(MPA); 308 Access = isl::manage(FunctionExpr(Access.release(), RefId.get(), UserExpr)); 309 RefToExpr = RefToExpr.set(RefId, Access); 310 } 311 312 return RefToExpr.release(); 313 } 314 315 /// Given a LLVM Type, compute its size in bytes, 316 static int computeSizeInBytes(const Type *T) { 317 int bytes = T->getPrimitiveSizeInBits() / 8; 318 if (bytes == 0) 319 bytes = T->getScalarSizeInBits() / 8; 320 return bytes; 321 } 322 323 /// Generate code for a GPU specific isl AST. 324 /// 325 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which 326 /// generates code for general-purpose AST nodes, with special functionality 327 /// for generating GPU specific user nodes. 328 /// 329 /// @see GPUNodeBuilder::createUser 330 class GPUNodeBuilder : public IslNodeBuilder { 331 public: 332 GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator, 333 const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE, 334 DominatorTree &DT, Scop &S, BasicBlock *StartBlock, 335 gpu_prog *Prog, GPURuntime Runtime, GPUArch Arch) 336 : IslNodeBuilder(Builder, Annotator, DL, LI, SE, DT, S, StartBlock), 337 Prog(Prog), Runtime(Runtime), Arch(Arch) { 338 getExprBuilder().setIDToSAI(&IDToSAI); 339 } 340 341 /// Create after-run-time-check initialization code. 342 void initializeAfterRTH(); 343 344 /// Finalize the generated scop. 345 virtual void finalize(); 346 347 /// Track if the full build process was successful. 348 /// 349 /// This value is set to false, if throughout the build process an error 350 /// occurred which prevents us from generating valid GPU code. 351 bool BuildSuccessful = true; 352 353 /// The maximal number of loops surrounding a sequential kernel. 354 unsigned DeepestSequential = 0; 355 356 /// The maximal number of loops surrounding a parallel kernel. 357 unsigned DeepestParallel = 0; 358 359 /// Return the name to set for the ptx_kernel. 360 std::string getKernelFuncName(int Kernel_id); 361 362 private: 363 /// A vector of array base pointers for which a new ScopArrayInfo was created. 364 /// 365 /// This vector is used to delete the ScopArrayInfo when it is not needed any 366 /// more. 367 std::vector<Value *> LocalArrays; 368 369 /// A map from ScopArrays to their corresponding device allocations. 370 std::map<ScopArrayInfo *, Value *> DeviceAllocations; 371 372 /// The current GPU context. 373 Value *GPUContext; 374 375 /// The set of isl_ids allocated in the kernel 376 std::vector<isl_id *> KernelIds; 377 378 /// A module containing GPU code. 379 /// 380 /// This pointer is only set in case we are currently generating GPU code. 381 std::unique_ptr<Module> GPUModule; 382 383 /// The GPU program we generate code for. 384 gpu_prog *Prog; 385 386 /// The GPU Runtime implementation to use (OpenCL or CUDA). 387 GPURuntime Runtime; 388 389 /// The GPU Architecture to target. 390 GPUArch Arch; 391 392 /// Class to free isl_ids. 393 class IslIdDeleter { 394 public: 395 void operator()(__isl_take isl_id *Id) { isl_id_free(Id); }; 396 }; 397 398 /// A set containing all isl_ids allocated in a GPU kernel. 399 /// 400 /// By releasing this set all isl_ids will be freed. 401 std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs; 402 403 IslExprBuilder::IDToScopArrayInfoTy IDToSAI; 404 405 /// Create code for user-defined AST nodes. 406 /// 407 /// These AST nodes can be of type: 408 /// 409 /// - ScopStmt: A computational statement (TODO) 410 /// - Kernel: A GPU kernel call (TODO) 411 /// - Data-Transfer: A GPU <-> CPU data-transfer 412 /// - In-kernel synchronization 413 /// - In-kernel memory copy statement 414 /// 415 /// @param UserStmt The ast node to generate code for. 416 virtual void createUser(__isl_take isl_ast_node *UserStmt); 417 418 virtual void createFor(__isl_take isl_ast_node *Node); 419 420 enum DataDirection { HOST_TO_DEVICE, DEVICE_TO_HOST }; 421 422 /// Create code for a data transfer statement 423 /// 424 /// @param TransferStmt The data transfer statement. 425 /// @param Direction The direction in which to transfer data. 426 void createDataTransfer(__isl_take isl_ast_node *TransferStmt, 427 enum DataDirection Direction); 428 429 /// Find llvm::Values referenced in GPU kernel. 430 /// 431 /// @param Kernel The kernel to scan for llvm::Values 432 /// 433 /// @returns A tuple, whose: 434 /// - First element contains the set of values referenced by the 435 /// kernel 436 /// - Second element contains the set of functions referenced by the 437 /// kernel. All functions in the set satisfy 438 /// `isValidFunctionInKernel`. 439 /// - Third element contains loops that have induction variables 440 /// which are used in the kernel, *and* these loops are *neither* 441 /// in the scop, nor do they immediately surroung the Scop. 442 /// See [Code generation of induction variables of loops outside 443 /// Scops] 444 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>, 445 isl::space> 446 getReferencesInKernel(ppcg_kernel *Kernel); 447 448 /// Compute the sizes of the execution grid for a given kernel. 449 /// 450 /// @param Kernel The kernel to compute grid sizes for. 451 /// 452 /// @returns A tuple with grid sizes for X and Y dimension 453 std::tuple<Value *, Value *> getGridSizes(ppcg_kernel *Kernel); 454 455 /// Get the managed array pointer for sending host pointers to the device. 456 /// \note 457 /// This is to be used only with managed memory 458 Value *getManagedDeviceArray(gpu_array_info *Array, ScopArrayInfo *ArrayInfo); 459 460 /// Compute the sizes of the thread blocks for a given kernel. 461 /// 462 /// @param Kernel The kernel to compute thread block sizes for. 463 /// 464 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions. 465 std::tuple<Value *, Value *, Value *> getBlockSizes(ppcg_kernel *Kernel); 466 467 /// Store a specific kernel launch parameter in the array of kernel launch 468 /// parameters. 469 /// 470 /// @param Parameters The list of parameters in which to store. 471 /// @param Param The kernel launch parameter to store. 472 /// @param Index The index in the parameter list, at which to store the 473 /// parameter. 474 void insertStoreParameter(Instruction *Parameters, Instruction *Param, 475 int Index); 476 477 /// Create kernel launch parameters. 478 /// 479 /// @param Kernel The kernel to create parameters for. 480 /// @param F The kernel function that has been created. 481 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 482 /// 483 /// @returns A stack allocated array with pointers to the parameter 484 /// values that are passed to the kernel. 485 Value *createLaunchParameters(ppcg_kernel *Kernel, Function *F, 486 SetVector<Value *> SubtreeValues); 487 488 /// Create declarations for kernel variable. 489 /// 490 /// This includes shared memory declarations. 491 /// 492 /// @param Kernel The kernel definition to create variables for. 493 /// @param FN The function into which to generate the variables. 494 void createKernelVariables(ppcg_kernel *Kernel, Function *FN); 495 496 /// Add CUDA annotations to module. 497 /// 498 /// Add a set of CUDA annotations that declares the maximal block dimensions 499 /// that will be used to execute the CUDA kernel. This allows the NVIDIA 500 /// PTX compiler to bound the number of allocated registers to ensure the 501 /// resulting kernel is known to run with up to as many block dimensions 502 /// as specified here. 503 /// 504 /// @param M The module to add the annotations to. 505 /// @param BlockDimX The size of block dimension X. 506 /// @param BlockDimY The size of block dimension Y. 507 /// @param BlockDimZ The size of block dimension Z. 508 void addCUDAAnnotations(Module *M, Value *BlockDimX, Value *BlockDimY, 509 Value *BlockDimZ); 510 511 /// Create GPU kernel. 512 /// 513 /// Code generate the kernel described by @p KernelStmt. 514 /// 515 /// @param KernelStmt The ast node to generate kernel code for. 516 void createKernel(__isl_take isl_ast_node *KernelStmt); 517 518 /// Generate code that computes the size of an array. 519 /// 520 /// @param Array The array for which to compute a size. 521 Value *getArraySize(gpu_array_info *Array); 522 523 /// Generate code to compute the minimal offset at which an array is accessed. 524 /// 525 /// The offset of an array is the minimal array location accessed in a scop. 526 /// 527 /// Example: 528 /// 529 /// for (long i = 0; i < 100; i++) 530 /// A[i + 42] += ... 531 /// 532 /// getArrayOffset(A) results in 42. 533 /// 534 /// @param Array The array for which to compute the offset. 535 /// @returns An llvm::Value that contains the offset of the array. 536 Value *getArrayOffset(gpu_array_info *Array); 537 538 /// Prepare the kernel arguments for kernel code generation 539 /// 540 /// @param Kernel The kernel to generate code for. 541 /// @param FN The function created for the kernel. 542 void prepareKernelArguments(ppcg_kernel *Kernel, Function *FN); 543 544 /// Create kernel function. 545 /// 546 /// Create a kernel function located in a newly created module that can serve 547 /// as target for device code generation. Set the Builder to point to the 548 /// start block of this newly created function. 549 /// 550 /// @param Kernel The kernel to generate code for. 551 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 552 /// @param SubtreeFunctions The set of llvm::Functions referenced by this 553 /// kernel. 554 void createKernelFunction(ppcg_kernel *Kernel, 555 SetVector<Value *> &SubtreeValues, 556 SetVector<Function *> &SubtreeFunctions); 557 558 /// Create the declaration of a kernel function. 559 /// 560 /// The kernel function takes as arguments: 561 /// 562 /// - One i8 pointer for each external array reference used in the kernel. 563 /// - Host iterators 564 /// - Parameters 565 /// - Other LLVM Value references (TODO) 566 /// 567 /// @param Kernel The kernel to generate the function declaration for. 568 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 569 /// 570 /// @returns The newly declared function. 571 Function *createKernelFunctionDecl(ppcg_kernel *Kernel, 572 SetVector<Value *> &SubtreeValues); 573 574 /// Insert intrinsic functions to obtain thread and block ids. 575 /// 576 /// @param The kernel to generate the intrinsic functions for. 577 void insertKernelIntrinsics(ppcg_kernel *Kernel); 578 579 /// Insert function calls to retrieve the SPIR group/local ids. 580 /// 581 /// @param The kernel to generate the function calls for. 582 void insertKernelCallsSPIR(ppcg_kernel *Kernel); 583 584 /// Setup the creation of functions referenced by the GPU kernel. 585 /// 586 /// 1. Create new function declarations in GPUModule which are the same as 587 /// SubtreeFunctions. 588 /// 589 /// 2. Populate IslNodeBuilder::ValueMap with mappings from 590 /// old functions (that come from the original module) to new functions 591 /// (that are created within GPUModule). That way, we generate references 592 /// to the correct function (in GPUModule) in BlockGenerator. 593 /// 594 /// @see IslNodeBuilder::ValueMap 595 /// @see BlockGenerator::GlobalMap 596 /// @see BlockGenerator::getNewValue 597 /// @see GPUNodeBuilder::getReferencesInKernel. 598 /// 599 /// @param SubtreeFunctions The set of llvm::Functions referenced by 600 /// this kernel. 601 void setupKernelSubtreeFunctions(SetVector<Function *> SubtreeFunctions); 602 603 /// Create a global-to-shared or shared-to-global copy statement. 604 /// 605 /// @param CopyStmt The copy statement to generate code for 606 void createKernelCopy(ppcg_kernel_stmt *CopyStmt); 607 608 /// Create code for a ScopStmt called in @p Expr. 609 /// 610 /// @param Expr The expression containing the call. 611 /// @param KernelStmt The kernel statement referenced in the call. 612 void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt); 613 614 /// Create an in-kernel synchronization call. 615 void createKernelSync(); 616 617 /// Create a PTX assembly string for the current GPU kernel. 618 /// 619 /// @returns A string containing the corresponding PTX assembly code. 620 std::string createKernelASM(); 621 622 /// Remove references from the dominator tree to the kernel function @p F. 623 /// 624 /// @param F The function to remove references to. 625 void clearDominators(Function *F); 626 627 /// Remove references from scalar evolution to the kernel function @p F. 628 /// 629 /// @param F The function to remove references to. 630 void clearScalarEvolution(Function *F); 631 632 /// Remove references from loop info to the kernel function @p F. 633 /// 634 /// @param F The function to remove references to. 635 void clearLoops(Function *F); 636 637 /// Check if the scop requires to be linked with CUDA's libdevice. 638 bool requiresCUDALibDevice(); 639 640 /// Link with the NVIDIA libdevice library (if needed and available). 641 void addCUDALibDevice(); 642 643 /// Finalize the generation of the kernel function. 644 /// 645 /// Free the LLVM-IR module corresponding to the kernel and -- if requested -- 646 /// dump its IR to stderr. 647 /// 648 /// @returns The Assembly string of the kernel. 649 std::string finalizeKernelFunction(); 650 651 /// Finalize the generation of the kernel arguments. 652 /// 653 /// This function ensures that not-read-only scalars used in a kernel are 654 /// stored back to the global memory location they are backed with before 655 /// the kernel terminates. 656 /// 657 /// @params Kernel The kernel to finalize kernel arguments for. 658 void finalizeKernelArguments(ppcg_kernel *Kernel); 659 660 /// Create code that allocates memory to store arrays on device. 661 void allocateDeviceArrays(); 662 663 /// Create code to prepare the managed device pointers. 664 void prepareManagedDeviceArrays(); 665 666 /// Free all allocated device arrays. 667 void freeDeviceArrays(); 668 669 /// Create a call to initialize the GPU context. 670 /// 671 /// @returns A pointer to the newly initialized context. 672 Value *createCallInitContext(); 673 674 /// Create a call to get the device pointer for a kernel allocation. 675 /// 676 /// @param Allocation The Polly GPU allocation 677 /// 678 /// @returns The device parameter corresponding to this allocation. 679 Value *createCallGetDevicePtr(Value *Allocation); 680 681 /// Create a call to free the GPU context. 682 /// 683 /// @param Context A pointer to an initialized GPU context. 684 void createCallFreeContext(Value *Context); 685 686 /// Create a call to allocate memory on the device. 687 /// 688 /// @param Size The size of memory to allocate 689 /// 690 /// @returns A pointer that identifies this allocation. 691 Value *createCallAllocateMemoryForDevice(Value *Size); 692 693 /// Create a call to free a device array. 694 /// 695 /// @param Array The device array to free. 696 void createCallFreeDeviceMemory(Value *Array); 697 698 /// Create a call to copy data from host to device. 699 /// 700 /// @param HostPtr A pointer to the host data that should be copied. 701 /// @param DevicePtr A device pointer specifying the location to copy to. 702 void createCallCopyFromHostToDevice(Value *HostPtr, Value *DevicePtr, 703 Value *Size); 704 705 /// Create a call to copy data from device to host. 706 /// 707 /// @param DevicePtr A pointer to the device data that should be copied. 708 /// @param HostPtr A host pointer specifying the location to copy to. 709 void createCallCopyFromDeviceToHost(Value *DevicePtr, Value *HostPtr, 710 Value *Size); 711 712 /// Create a call to synchronize Host & Device. 713 /// \note 714 /// This is to be used only with managed memory. 715 void createCallSynchronizeDevice(); 716 717 /// Create a call to get a kernel from an assembly string. 718 /// 719 /// @param Buffer The string describing the kernel. 720 /// @param Entry The name of the kernel function to call. 721 /// 722 /// @returns A pointer to a kernel object 723 Value *createCallGetKernel(Value *Buffer, Value *Entry); 724 725 /// Create a call to free a GPU kernel. 726 /// 727 /// @param GPUKernel THe kernel to free. 728 void createCallFreeKernel(Value *GPUKernel); 729 730 /// Create a call to launch a GPU kernel. 731 /// 732 /// @param GPUKernel The kernel to launch. 733 /// @param GridDimX The size of the first grid dimension. 734 /// @param GridDimY The size of the second grid dimension. 735 /// @param GridBlockX The size of the first block dimension. 736 /// @param GridBlockY The size of the second block dimension. 737 /// @param GridBlockZ The size of the third block dimension. 738 /// @param Parameters A pointer to an array that contains itself pointers to 739 /// the parameter values passed for each kernel argument. 740 void createCallLaunchKernel(Value *GPUKernel, Value *GridDimX, 741 Value *GridDimY, Value *BlockDimX, 742 Value *BlockDimY, Value *BlockDimZ, 743 Value *Parameters); 744 }; 745 746 std::string GPUNodeBuilder::getKernelFuncName(int Kernel_id) { 747 return "FUNC_" + S.getFunction().getName().str() + "_SCOP_" + 748 std::to_string(S.getID()) + "_KERNEL_" + std::to_string(Kernel_id); 749 } 750 751 void GPUNodeBuilder::initializeAfterRTH() { 752 BasicBlock *NewBB = SplitBlock(Builder.GetInsertBlock(), 753 &*Builder.GetInsertPoint(), &DT, &LI); 754 NewBB->setName("polly.acc.initialize"); 755 Builder.SetInsertPoint(&NewBB->front()); 756 757 GPUContext = createCallInitContext(); 758 759 if (!PollyManagedMemory) 760 allocateDeviceArrays(); 761 else 762 prepareManagedDeviceArrays(); 763 } 764 765 void GPUNodeBuilder::finalize() { 766 if (!PollyManagedMemory) 767 freeDeviceArrays(); 768 769 createCallFreeContext(GPUContext); 770 IslNodeBuilder::finalize(); 771 } 772 773 void GPUNodeBuilder::allocateDeviceArrays() { 774 assert(!PollyManagedMemory && 775 "Managed memory will directly send host pointers " 776 "to the kernel. There is no need for device arrays"); 777 isl_ast_build *Build = isl_ast_build_from_context(S.getContext().release()); 778 779 for (int i = 0; i < Prog->n_array; ++i) { 780 gpu_array_info *Array = &Prog->array[i]; 781 auto *ScopArray = (ScopArrayInfo *)Array->user; 782 std::string DevArrayName("p_dev_array_"); 783 DevArrayName.append(Array->name); 784 785 Value *ArraySize = getArraySize(Array); 786 Value *Offset = getArrayOffset(Array); 787 if (Offset) 788 ArraySize = Builder.CreateSub( 789 ArraySize, 790 Builder.CreateMul(Offset, 791 Builder.getInt64(ScopArray->getElemSizeInBytes()))); 792 const SCEV *SizeSCEV = SE.getSCEV(ArraySize); 793 // It makes no sense to have an array of size 0. The CUDA API will 794 // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We 795 // choose to be defensive and catch this at the compile phase. It is 796 // most likely that we are doing something wrong with size computation. 797 if (SizeSCEV->isZero()) { 798 errs() << getUniqueScopName(&S) 799 << " has computed array size 0: " << *ArraySize 800 << " | for array: " << *(ScopArray->getBasePtr()) 801 << ". This is illegal, exiting.\n"; 802 report_fatal_error("array size was computed to be 0"); 803 } 804 805 Value *DevArray = createCallAllocateMemoryForDevice(ArraySize); 806 DevArray->setName(DevArrayName); 807 DeviceAllocations[ScopArray] = DevArray; 808 } 809 810 isl_ast_build_free(Build); 811 } 812 813 void GPUNodeBuilder::prepareManagedDeviceArrays() { 814 assert(PollyManagedMemory && 815 "Device array most only be prepared in managed-memory mode"); 816 for (int i = 0; i < Prog->n_array; ++i) { 817 gpu_array_info *Array = &Prog->array[i]; 818 ScopArrayInfo *ScopArray = (ScopArrayInfo *)Array->user; 819 Value *HostPtr; 820 821 if (gpu_array_is_scalar(Array)) 822 HostPtr = BlockGen.getOrCreateAlloca(ScopArray); 823 else 824 HostPtr = ScopArray->getBasePtr(); 825 HostPtr = getLatestValue(HostPtr); 826 827 Value *Offset = getArrayOffset(Array); 828 if (Offset) { 829 HostPtr = Builder.CreatePointerCast( 830 HostPtr, ScopArray->getElementType()->getPointerTo()); 831 HostPtr = Builder.CreateGEP(HostPtr, Offset); 832 } 833 834 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy()); 835 DeviceAllocations[ScopArray] = HostPtr; 836 } 837 } 838 839 void GPUNodeBuilder::addCUDAAnnotations(Module *M, Value *BlockDimX, 840 Value *BlockDimY, Value *BlockDimZ) { 841 auto AnnotationNode = M->getOrInsertNamedMetadata("nvvm.annotations"); 842 843 for (auto &F : *M) { 844 if (F.getCallingConv() != CallingConv::PTX_Kernel) 845 continue; 846 847 Value *V[] = {BlockDimX, BlockDimY, BlockDimZ}; 848 849 Metadata *Elements[] = { 850 ValueAsMetadata::get(&F), MDString::get(M->getContext(), "maxntidx"), 851 ValueAsMetadata::get(V[0]), MDString::get(M->getContext(), "maxntidy"), 852 ValueAsMetadata::get(V[1]), MDString::get(M->getContext(), "maxntidz"), 853 ValueAsMetadata::get(V[2]), 854 }; 855 MDNode *Node = MDNode::get(M->getContext(), Elements); 856 AnnotationNode->addOperand(Node); 857 } 858 } 859 860 void GPUNodeBuilder::freeDeviceArrays() { 861 assert(!PollyManagedMemory && "Managed memory does not use device arrays"); 862 for (auto &Array : DeviceAllocations) 863 createCallFreeDeviceMemory(Array.second); 864 } 865 866 Value *GPUNodeBuilder::createCallGetKernel(Value *Buffer, Value *Entry) { 867 const char *Name = "polly_getKernel"; 868 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 869 Function *F = M->getFunction(Name); 870 871 // If F is not available, declare it. 872 if (!F) { 873 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 874 std::vector<Type *> Args; 875 Args.push_back(Builder.getInt8PtrTy()); 876 Args.push_back(Builder.getInt8PtrTy()); 877 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 878 F = Function::Create(Ty, Linkage, Name, M); 879 } 880 881 return Builder.CreateCall(F, {Buffer, Entry}); 882 } 883 884 Value *GPUNodeBuilder::createCallGetDevicePtr(Value *Allocation) { 885 const char *Name = "polly_getDevicePtr"; 886 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 887 Function *F = M->getFunction(Name); 888 889 // If F is not available, declare it. 890 if (!F) { 891 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 892 std::vector<Type *> Args; 893 Args.push_back(Builder.getInt8PtrTy()); 894 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 895 F = Function::Create(Ty, Linkage, Name, M); 896 } 897 898 return Builder.CreateCall(F, {Allocation}); 899 } 900 901 void GPUNodeBuilder::createCallLaunchKernel(Value *GPUKernel, Value *GridDimX, 902 Value *GridDimY, Value *BlockDimX, 903 Value *BlockDimY, Value *BlockDimZ, 904 Value *Parameters) { 905 const char *Name = "polly_launchKernel"; 906 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 907 Function *F = M->getFunction(Name); 908 909 // If F is not available, declare it. 910 if (!F) { 911 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 912 std::vector<Type *> Args; 913 Args.push_back(Builder.getInt8PtrTy()); 914 Args.push_back(Builder.getInt32Ty()); 915 Args.push_back(Builder.getInt32Ty()); 916 Args.push_back(Builder.getInt32Ty()); 917 Args.push_back(Builder.getInt32Ty()); 918 Args.push_back(Builder.getInt32Ty()); 919 Args.push_back(Builder.getInt8PtrTy()); 920 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 921 F = Function::Create(Ty, Linkage, Name, M); 922 } 923 924 Builder.CreateCall(F, {GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY, 925 BlockDimZ, Parameters}); 926 } 927 928 void GPUNodeBuilder::createCallFreeKernel(Value *GPUKernel) { 929 const char *Name = "polly_freeKernel"; 930 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 931 Function *F = M->getFunction(Name); 932 933 // If F is not available, declare it. 934 if (!F) { 935 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 936 std::vector<Type *> Args; 937 Args.push_back(Builder.getInt8PtrTy()); 938 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 939 F = Function::Create(Ty, Linkage, Name, M); 940 } 941 942 Builder.CreateCall(F, {GPUKernel}); 943 } 944 945 void GPUNodeBuilder::createCallFreeDeviceMemory(Value *Array) { 946 assert(!PollyManagedMemory && 947 "Managed memory does not allocate or free memory " 948 "for device"); 949 const char *Name = "polly_freeDeviceMemory"; 950 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 951 Function *F = M->getFunction(Name); 952 953 // If F is not available, declare it. 954 if (!F) { 955 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 956 std::vector<Type *> Args; 957 Args.push_back(Builder.getInt8PtrTy()); 958 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 959 F = Function::Create(Ty, Linkage, Name, M); 960 } 961 962 Builder.CreateCall(F, {Array}); 963 } 964 965 Value *GPUNodeBuilder::createCallAllocateMemoryForDevice(Value *Size) { 966 assert(!PollyManagedMemory && 967 "Managed memory does not allocate or free memory " 968 "for device"); 969 const char *Name = "polly_allocateMemoryForDevice"; 970 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 971 Function *F = M->getFunction(Name); 972 973 // If F is not available, declare it. 974 if (!F) { 975 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 976 std::vector<Type *> Args; 977 Args.push_back(Builder.getInt64Ty()); 978 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 979 F = Function::Create(Ty, Linkage, Name, M); 980 } 981 982 return Builder.CreateCall(F, {Size}); 983 } 984 985 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value *HostData, 986 Value *DeviceData, 987 Value *Size) { 988 assert(!PollyManagedMemory && 989 "Managed memory does not transfer memory between " 990 "device and host"); 991 const char *Name = "polly_copyFromHostToDevice"; 992 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 993 Function *F = M->getFunction(Name); 994 995 // If F is not available, declare it. 996 if (!F) { 997 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 998 std::vector<Type *> Args; 999 Args.push_back(Builder.getInt8PtrTy()); 1000 Args.push_back(Builder.getInt8PtrTy()); 1001 Args.push_back(Builder.getInt64Ty()); 1002 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1003 F = Function::Create(Ty, Linkage, Name, M); 1004 } 1005 1006 Builder.CreateCall(F, {HostData, DeviceData, Size}); 1007 } 1008 1009 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value *DeviceData, 1010 Value *HostData, 1011 Value *Size) { 1012 assert(!PollyManagedMemory && 1013 "Managed memory does not transfer memory between " 1014 "device and host"); 1015 const char *Name = "polly_copyFromDeviceToHost"; 1016 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1017 Function *F = M->getFunction(Name); 1018 1019 // If F is not available, declare it. 1020 if (!F) { 1021 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1022 std::vector<Type *> Args; 1023 Args.push_back(Builder.getInt8PtrTy()); 1024 Args.push_back(Builder.getInt8PtrTy()); 1025 Args.push_back(Builder.getInt64Ty()); 1026 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1027 F = Function::Create(Ty, Linkage, Name, M); 1028 } 1029 1030 Builder.CreateCall(F, {DeviceData, HostData, Size}); 1031 } 1032 1033 void GPUNodeBuilder::createCallSynchronizeDevice() { 1034 assert(PollyManagedMemory && "explicit synchronization is only necessary for " 1035 "managed memory"); 1036 const char *Name = "polly_synchronizeDevice"; 1037 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1038 Function *F = M->getFunction(Name); 1039 1040 // If F is not available, declare it. 1041 if (!F) { 1042 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1043 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), false); 1044 F = Function::Create(Ty, Linkage, Name, M); 1045 } 1046 1047 Builder.CreateCall(F); 1048 } 1049 1050 Value *GPUNodeBuilder::createCallInitContext() { 1051 const char *Name; 1052 1053 switch (Runtime) { 1054 case GPURuntime::CUDA: 1055 Name = "polly_initContextCUDA"; 1056 break; 1057 case GPURuntime::OpenCL: 1058 Name = "polly_initContextCL"; 1059 break; 1060 } 1061 1062 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1063 Function *F = M->getFunction(Name); 1064 1065 // If F is not available, declare it. 1066 if (!F) { 1067 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1068 std::vector<Type *> Args; 1069 FunctionType *Ty = FunctionType::get(Builder.getInt8PtrTy(), Args, false); 1070 F = Function::Create(Ty, Linkage, Name, M); 1071 } 1072 1073 return Builder.CreateCall(F, {}); 1074 } 1075 1076 void GPUNodeBuilder::createCallFreeContext(Value *Context) { 1077 const char *Name = "polly_freeContext"; 1078 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1079 Function *F = M->getFunction(Name); 1080 1081 // If F is not available, declare it. 1082 if (!F) { 1083 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1084 std::vector<Type *> Args; 1085 Args.push_back(Builder.getInt8PtrTy()); 1086 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1087 F = Function::Create(Ty, Linkage, Name, M); 1088 } 1089 1090 Builder.CreateCall(F, {Context}); 1091 } 1092 1093 /// Check if one string is a prefix of another. 1094 /// 1095 /// @param String The string in which to look for the prefix. 1096 /// @param Prefix The prefix to look for. 1097 static bool isPrefix(std::string String, std::string Prefix) { 1098 return String.find(Prefix) == 0; 1099 } 1100 1101 Value *GPUNodeBuilder::getArraySize(gpu_array_info *Array) { 1102 isl::ast_build Build = isl::ast_build::from_context(S.getContext()); 1103 Value *ArraySize = ConstantInt::get(Builder.getInt64Ty(), Array->size); 1104 1105 if (!gpu_array_is_scalar(Array)) { 1106 isl::multi_pw_aff ArrayBound = 1107 isl::manage(isl_multi_pw_aff_copy(Array->bound)); 1108 1109 isl::pw_aff OffsetDimZero = ArrayBound.get_pw_aff(0); 1110 isl::ast_expr Res = Build.expr_from(OffsetDimZero); 1111 1112 for (unsigned int i = 1; i < Array->n_index; i++) { 1113 isl::pw_aff Bound_I = ArrayBound.get_pw_aff(i); 1114 isl::ast_expr Expr = Build.expr_from(Bound_I); 1115 Res = Res.mul(Expr); 1116 } 1117 1118 Value *NumElements = ExprBuilder.create(Res.release()); 1119 if (NumElements->getType() != ArraySize->getType()) 1120 NumElements = Builder.CreateSExt(NumElements, ArraySize->getType()); 1121 ArraySize = Builder.CreateMul(ArraySize, NumElements); 1122 } 1123 return ArraySize; 1124 } 1125 1126 Value *GPUNodeBuilder::getArrayOffset(gpu_array_info *Array) { 1127 if (gpu_array_is_scalar(Array)) 1128 return nullptr; 1129 1130 isl::ast_build Build = isl::ast_build::from_context(S.getContext()); 1131 1132 isl::set Min = isl::manage(isl_set_copy(Array->extent)).lexmin(); 1133 1134 isl::set ZeroSet = isl::set::universe(Min.get_space()); 1135 1136 for (long i = 0, n = Min.dim(isl::dim::set); i < n; i++) 1137 ZeroSet = ZeroSet.fix_si(isl::dim::set, i, 0); 1138 1139 if (Min.is_subset(ZeroSet)) { 1140 return nullptr; 1141 } 1142 1143 isl::ast_expr Result = isl::ast_expr::from_val(isl::val(Min.get_ctx(), 0)); 1144 1145 for (long i = 0, n = Min.dim(isl::dim::set); i < n; i++) { 1146 if (i > 0) { 1147 isl::pw_aff Bound_I = 1148 isl::manage(isl_multi_pw_aff_get_pw_aff(Array->bound, i - 1)); 1149 isl::ast_expr BExpr = Build.expr_from(Bound_I); 1150 Result = Result.mul(BExpr); 1151 } 1152 isl::pw_aff DimMin = Min.dim_min(i); 1153 isl::ast_expr MExpr = Build.expr_from(DimMin); 1154 Result = Result.add(MExpr); 1155 } 1156 1157 return ExprBuilder.create(Result.release()); 1158 } 1159 1160 Value *GPUNodeBuilder::getManagedDeviceArray(gpu_array_info *Array, 1161 ScopArrayInfo *ArrayInfo) { 1162 assert(PollyManagedMemory && "Only used when you wish to get a host " 1163 "pointer for sending data to the kernel, " 1164 "with managed memory"); 1165 std::map<ScopArrayInfo *, Value *>::iterator it; 1166 it = DeviceAllocations.find(ArrayInfo); 1167 assert(it != DeviceAllocations.end() && 1168 "Device array expected to be available"); 1169 return it->second; 1170 } 1171 1172 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node *TransferStmt, 1173 enum DataDirection Direction) { 1174 assert(!PollyManagedMemory && "Managed memory needs no data transfers"); 1175 isl_ast_expr *Expr = isl_ast_node_user_get_expr(TransferStmt); 1176 isl_ast_expr *Arg = isl_ast_expr_get_op_arg(Expr, 0); 1177 isl_id *Id = isl_ast_expr_get_id(Arg); 1178 auto Array = (gpu_array_info *)isl_id_get_user(Id); 1179 auto ScopArray = (ScopArrayInfo *)(Array->user); 1180 1181 Value *Size = getArraySize(Array); 1182 Value *Offset = getArrayOffset(Array); 1183 Value *DevPtr = DeviceAllocations[ScopArray]; 1184 1185 Value *HostPtr; 1186 1187 if (gpu_array_is_scalar(Array)) 1188 HostPtr = BlockGen.getOrCreateAlloca(ScopArray); 1189 else 1190 HostPtr = ScopArray->getBasePtr(); 1191 HostPtr = getLatestValue(HostPtr); 1192 1193 if (Offset) { 1194 HostPtr = Builder.CreatePointerCast( 1195 HostPtr, ScopArray->getElementType()->getPointerTo()); 1196 HostPtr = Builder.CreateGEP(HostPtr, Offset); 1197 } 1198 1199 HostPtr = Builder.CreatePointerCast(HostPtr, Builder.getInt8PtrTy()); 1200 1201 if (Offset) { 1202 Size = Builder.CreateSub( 1203 Size, Builder.CreateMul( 1204 Offset, Builder.getInt64(ScopArray->getElemSizeInBytes()))); 1205 } 1206 1207 if (Direction == HOST_TO_DEVICE) 1208 createCallCopyFromHostToDevice(HostPtr, DevPtr, Size); 1209 else 1210 createCallCopyFromDeviceToHost(DevPtr, HostPtr, Size); 1211 1212 isl_id_free(Id); 1213 isl_ast_expr_free(Arg); 1214 isl_ast_expr_free(Expr); 1215 isl_ast_node_free(TransferStmt); 1216 } 1217 1218 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) { 1219 isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt); 1220 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0); 1221 isl_id *Id = isl_ast_expr_get_id(StmtExpr); 1222 isl_id_free(Id); 1223 isl_ast_expr_free(StmtExpr); 1224 1225 const char *Str = isl_id_get_name(Id); 1226 if (!strcmp(Str, "kernel")) { 1227 createKernel(UserStmt); 1228 if (PollyManagedMemory) 1229 createCallSynchronizeDevice(); 1230 isl_ast_expr_free(Expr); 1231 return; 1232 } 1233 if (!strcmp(Str, "init_device")) { 1234 initializeAfterRTH(); 1235 isl_ast_node_free(UserStmt); 1236 isl_ast_expr_free(Expr); 1237 return; 1238 } 1239 if (!strcmp(Str, "clear_device")) { 1240 finalize(); 1241 isl_ast_node_free(UserStmt); 1242 isl_ast_expr_free(Expr); 1243 return; 1244 } 1245 if (isPrefix(Str, "to_device")) { 1246 if (!PollyManagedMemory) 1247 createDataTransfer(UserStmt, HOST_TO_DEVICE); 1248 else 1249 isl_ast_node_free(UserStmt); 1250 1251 isl_ast_expr_free(Expr); 1252 return; 1253 } 1254 1255 if (isPrefix(Str, "from_device")) { 1256 if (!PollyManagedMemory) { 1257 createDataTransfer(UserStmt, DEVICE_TO_HOST); 1258 } else { 1259 isl_ast_node_free(UserStmt); 1260 } 1261 isl_ast_expr_free(Expr); 1262 return; 1263 } 1264 1265 isl_id *Anno = isl_ast_node_get_annotation(UserStmt); 1266 struct ppcg_kernel_stmt *KernelStmt = 1267 (struct ppcg_kernel_stmt *)isl_id_get_user(Anno); 1268 isl_id_free(Anno); 1269 1270 switch (KernelStmt->type) { 1271 case ppcg_kernel_domain: 1272 createScopStmt(Expr, KernelStmt); 1273 isl_ast_node_free(UserStmt); 1274 return; 1275 case ppcg_kernel_copy: 1276 createKernelCopy(KernelStmt); 1277 isl_ast_expr_free(Expr); 1278 isl_ast_node_free(UserStmt); 1279 return; 1280 case ppcg_kernel_sync: 1281 createKernelSync(); 1282 isl_ast_expr_free(Expr); 1283 isl_ast_node_free(UserStmt); 1284 return; 1285 } 1286 1287 isl_ast_expr_free(Expr); 1288 isl_ast_node_free(UserStmt); 1289 return; 1290 } 1291 1292 void GPUNodeBuilder::createFor(__isl_take isl_ast_node *Node) { 1293 createForSequential(Node, false); 1294 } 1295 1296 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt *KernelStmt) { 1297 isl_ast_expr *LocalIndex = isl_ast_expr_copy(KernelStmt->u.c.local_index); 1298 LocalIndex = isl_ast_expr_address_of(LocalIndex); 1299 Value *LocalAddr = ExprBuilder.create(LocalIndex); 1300 isl_ast_expr *Index = isl_ast_expr_copy(KernelStmt->u.c.index); 1301 Index = isl_ast_expr_address_of(Index); 1302 Value *GlobalAddr = ExprBuilder.create(Index); 1303 1304 if (KernelStmt->u.c.read) { 1305 LoadInst *Load = Builder.CreateLoad(GlobalAddr, "shared.read"); 1306 Builder.CreateStore(Load, LocalAddr); 1307 } else { 1308 LoadInst *Load = Builder.CreateLoad(LocalAddr, "shared.write"); 1309 Builder.CreateStore(Load, GlobalAddr); 1310 } 1311 } 1312 1313 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr, 1314 ppcg_kernel_stmt *KernelStmt) { 1315 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt; 1316 isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr; 1317 1318 LoopToScevMapT LTS; 1319 LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end()); 1320 1321 createSubstitutions(Expr, Stmt, LTS); 1322 1323 if (Stmt->isBlockStmt()) 1324 BlockGen.copyStmt(*Stmt, LTS, Indexes); 1325 else 1326 RegionGen.copyStmt(*Stmt, LTS, Indexes); 1327 } 1328 1329 void GPUNodeBuilder::createKernelSync() { 1330 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 1331 const char *SpirName = "__gen_ocl_barrier_global"; 1332 1333 Function *Sync; 1334 1335 switch (Arch) { 1336 case GPUArch::SPIR64: 1337 case GPUArch::SPIR32: 1338 Sync = M->getFunction(SpirName); 1339 1340 // If Sync is not available, declare it. 1341 if (!Sync) { 1342 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 1343 std::vector<Type *> Args; 1344 FunctionType *Ty = FunctionType::get(Builder.getVoidTy(), Args, false); 1345 Sync = Function::Create(Ty, Linkage, SpirName, M); 1346 Sync->setCallingConv(CallingConv::SPIR_FUNC); 1347 } 1348 break; 1349 case GPUArch::NVPTX64: 1350 Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0); 1351 break; 1352 } 1353 1354 Builder.CreateCall(Sync, {}); 1355 } 1356 1357 /// Collect llvm::Values referenced from @p Node 1358 /// 1359 /// This function only applies to isl_ast_nodes that are user_nodes referring 1360 /// to a ScopStmt. All other node types are ignore. 1361 /// 1362 /// @param Node The node to collect references for. 1363 /// @param User A user pointer used as storage for the data that is collected. 1364 /// 1365 /// @returns isl_bool_true if data could be collected successfully. 1366 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) { 1367 if (isl_ast_node_get_type(Node) != isl_ast_node_user) 1368 return isl_bool_true; 1369 1370 isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node); 1371 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0); 1372 isl_id *Id = isl_ast_expr_get_id(StmtExpr); 1373 const char *Str = isl_id_get_name(Id); 1374 isl_id_free(Id); 1375 isl_ast_expr_free(StmtExpr); 1376 isl_ast_expr_free(Expr); 1377 1378 if (!isPrefix(Str, "Stmt")) 1379 return isl_bool_true; 1380 1381 Id = isl_ast_node_get_annotation(Node); 1382 auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id); 1383 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt; 1384 isl_id_free(Id); 1385 1386 addReferencesFromStmt(Stmt, User, false /* CreateScalarRefs */); 1387 1388 return isl_bool_true; 1389 } 1390 1391 /// A list of functions that are available in NVIDIA's libdevice. 1392 const std::set<std::string> CUDALibDeviceFunctions = { 1393 "exp", "expf", "expl", "cos", "cosf", "sqrt", "sqrtf", 1394 "copysign", "copysignf", "copysignl", "log", "logf", "powi", "powif"}; 1395 1396 // A map from intrinsics to their corresponding libdevice functions. 1397 const std::map<std::string, std::string> IntrinsicToLibdeviceFunc = { 1398 {"llvm.exp.f64", "exp"}, 1399 {"llvm.exp.f32", "expf"}, 1400 {"llvm.powi.f64", "powi"}, 1401 {"llvm.powi.f32", "powif"}}; 1402 1403 /// Return the corresponding CUDA libdevice function name for @p F. 1404 /// Note that this function will try to convert instrinsics in the list 1405 /// IntrinsicToLibdeviceFunc into libdevice functions. 1406 /// This is because some intrinsics such as `exp` 1407 /// are not supported by the NVPTX backend. 1408 /// If this restriction of the backend is lifted, we should refactor our code 1409 /// so that we use intrinsics whenever possible. 1410 /// 1411 /// Return "" if we are not compiling for CUDA. 1412 std::string getCUDALibDeviceFuntion(Function *F) { 1413 const std::string FnName = [&] { 1414 auto It = IntrinsicToLibdeviceFunc.find(F->getName()); 1415 if (It != IntrinsicToLibdeviceFunc.end()) 1416 return It->second; 1417 1418 return std::string(F->getName()); 1419 }(); 1420 1421 if (CUDALibDeviceFunctions.count(FnName)) 1422 return "__nv_" + FnName; 1423 1424 return ""; 1425 } 1426 1427 /// Check if F is a function that we can code-generate in a GPU kernel. 1428 static bool isValidFunctionInKernel(llvm::Function *F, bool AllowLibDevice) { 1429 assert(F && "F is an invalid pointer"); 1430 // We string compare against the name of the function to allow 1431 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and 1432 // "llvm.copysign". 1433 const StringRef Name = F->getName(); 1434 1435 if (AllowLibDevice && getCUDALibDeviceFuntion(F).length() > 0) 1436 return true; 1437 1438 return F->isIntrinsic() && 1439 (Name.startswith("llvm.sqrt") || Name.startswith("llvm.fabs") || 1440 Name.startswith("llvm.copysign")); 1441 } 1442 1443 /// Do not take `Function` as a subtree value. 1444 /// 1445 /// We try to take the reference of all subtree values and pass them along 1446 /// to the kernel from the host. Taking an address of any function and 1447 /// trying to pass along is nonsensical. Only allow `Value`s that are not 1448 /// `Function`s. 1449 static bool isValidSubtreeValue(llvm::Value *V) { return !isa<Function>(V); } 1450 1451 /// Return `Function`s from `RawSubtreeValues`. 1452 static SetVector<Function *> 1453 getFunctionsFromRawSubtreeValues(SetVector<Value *> RawSubtreeValues, 1454 bool AllowCUDALibDevice) { 1455 SetVector<Function *> SubtreeFunctions; 1456 for (Value *It : RawSubtreeValues) { 1457 Function *F = dyn_cast<Function>(It); 1458 if (F) { 1459 assert(isValidFunctionInKernel(F, AllowCUDALibDevice) && 1460 "Code should have bailed out by " 1461 "this point if an invalid function " 1462 "were present in a kernel."); 1463 SubtreeFunctions.insert(F); 1464 } 1465 } 1466 return SubtreeFunctions; 1467 } 1468 1469 std::tuple<SetVector<Value *>, SetVector<Function *>, SetVector<const Loop *>, 1470 isl::space> 1471 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) { 1472 SetVector<Value *> SubtreeValues; 1473 SetVector<const SCEV *> SCEVs; 1474 SetVector<const Loop *> Loops; 1475 isl::space ParamSpace = isl::space(S.getIslCtx(), 0, 0).params(); 1476 SubtreeReferences References = { 1477 LI, SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator(), 1478 &ParamSpace}; 1479 1480 for (const auto &I : IDToValue) 1481 SubtreeValues.insert(I.second); 1482 1483 // NOTE: this is populated in IslNodeBuilder::addParameters 1484 // See [Code generation of induction variables of loops outside Scops]. 1485 for (const auto &I : OutsideLoopIterations) 1486 SubtreeValues.insert(cast<SCEVUnknown>(I.second)->getValue()); 1487 1488 isl_ast_node_foreach_descendant_top_down( 1489 Kernel->tree, collectReferencesInGPUStmt, &References); 1490 1491 for (const SCEV *Expr : SCEVs) { 1492 findValues(Expr, SE, SubtreeValues); 1493 findLoops(Expr, Loops); 1494 } 1495 1496 Loops.remove_if([this](const Loop *L) { 1497 return S.contains(L) || L->contains(S.getEntry()); 1498 }); 1499 1500 for (auto &SAI : S.arrays()) 1501 SubtreeValues.remove(SAI->getBasePtr()); 1502 1503 isl_space *Space = S.getParamSpace().release(); 1504 for (long i = 0, n = isl_space_dim(Space, isl_dim_param); i < n; i++) { 1505 isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i); 1506 assert(IDToValue.count(Id)); 1507 Value *Val = IDToValue[Id]; 1508 SubtreeValues.remove(Val); 1509 isl_id_free(Id); 1510 } 1511 isl_space_free(Space); 1512 1513 for (long i = 0, n = isl_space_dim(Kernel->space, isl_dim_set); i < n; i++) { 1514 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 1515 assert(IDToValue.count(Id)); 1516 Value *Val = IDToValue[Id]; 1517 SubtreeValues.remove(Val); 1518 isl_id_free(Id); 1519 } 1520 1521 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions 1522 // SubtreeValues. This is important, because we should not lose any 1523 // SubtreeValues in the process of constructing the 1524 // "ValidSubtree{Values, Functions} sets. Nor should the set 1525 // ValidSubtree{Values, Functions} have any common element. 1526 auto ValidSubtreeValuesIt = 1527 make_filter_range(SubtreeValues, isValidSubtreeValue); 1528 SetVector<Value *> ValidSubtreeValues(ValidSubtreeValuesIt.begin(), 1529 ValidSubtreeValuesIt.end()); 1530 1531 bool AllowCUDALibDevice = Arch == GPUArch::NVPTX64; 1532 1533 SetVector<Function *> ValidSubtreeFunctions( 1534 getFunctionsFromRawSubtreeValues(SubtreeValues, AllowCUDALibDevice)); 1535 1536 // @see IslNodeBuilder::getReferencesInSubtree 1537 SetVector<Value *> ReplacedValues; 1538 for (Value *V : ValidSubtreeValues) { 1539 auto It = ValueMap.find(V); 1540 if (It == ValueMap.end()) 1541 ReplacedValues.insert(V); 1542 else 1543 ReplacedValues.insert(It->second); 1544 } 1545 return std::make_tuple(ReplacedValues, ValidSubtreeFunctions, Loops, 1546 ParamSpace); 1547 } 1548 1549 void GPUNodeBuilder::clearDominators(Function *F) { 1550 DomTreeNode *N = DT.getNode(&F->getEntryBlock()); 1551 std::vector<BasicBlock *> Nodes; 1552 for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I) 1553 Nodes.push_back(I->getBlock()); 1554 1555 for (BasicBlock *BB : Nodes) 1556 DT.eraseNode(BB); 1557 } 1558 1559 void GPUNodeBuilder::clearScalarEvolution(Function *F) { 1560 for (BasicBlock &BB : *F) { 1561 Loop *L = LI.getLoopFor(&BB); 1562 if (L) 1563 SE.forgetLoop(L); 1564 } 1565 } 1566 1567 void GPUNodeBuilder::clearLoops(Function *F) { 1568 for (BasicBlock &BB : *F) { 1569 Loop *L = LI.getLoopFor(&BB); 1570 if (L) 1571 SE.forgetLoop(L); 1572 LI.removeBlock(&BB); 1573 } 1574 } 1575 1576 std::tuple<Value *, Value *> GPUNodeBuilder::getGridSizes(ppcg_kernel *Kernel) { 1577 std::vector<Value *> Sizes; 1578 isl::ast_build Context = isl::ast_build::from_context(S.getContext()); 1579 1580 isl::multi_pw_aff GridSizePwAffs = 1581 isl::manage(isl_multi_pw_aff_copy(Kernel->grid_size)); 1582 for (long i = 0; i < Kernel->n_grid; i++) { 1583 isl::pw_aff Size = GridSizePwAffs.get_pw_aff(i); 1584 isl::ast_expr GridSize = Context.expr_from(Size); 1585 Value *Res = ExprBuilder.create(GridSize.release()); 1586 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty()); 1587 Sizes.push_back(Res); 1588 } 1589 1590 for (long i = Kernel->n_grid; i < 3; i++) 1591 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1)); 1592 1593 return std::make_tuple(Sizes[0], Sizes[1]); 1594 } 1595 1596 std::tuple<Value *, Value *, Value *> 1597 GPUNodeBuilder::getBlockSizes(ppcg_kernel *Kernel) { 1598 std::vector<Value *> Sizes; 1599 1600 for (long i = 0; i < Kernel->n_block; i++) { 1601 Value *Res = ConstantInt::get(Builder.getInt32Ty(), Kernel->block_dim[i]); 1602 Sizes.push_back(Res); 1603 } 1604 1605 for (long i = Kernel->n_block; i < 3; i++) 1606 Sizes.push_back(ConstantInt::get(Builder.getInt32Ty(), 1)); 1607 1608 return std::make_tuple(Sizes[0], Sizes[1], Sizes[2]); 1609 } 1610 1611 void GPUNodeBuilder::insertStoreParameter(Instruction *Parameters, 1612 Instruction *Param, int Index) { 1613 Value *Slot = Builder.CreateGEP( 1614 Parameters, {Builder.getInt64(0), Builder.getInt64(Index)}); 1615 Value *ParamTyped = Builder.CreatePointerCast(Param, Builder.getInt8PtrTy()); 1616 Builder.CreateStore(ParamTyped, Slot); 1617 } 1618 1619 Value * 1620 GPUNodeBuilder::createLaunchParameters(ppcg_kernel *Kernel, Function *F, 1621 SetVector<Value *> SubtreeValues) { 1622 const int NumArgs = F->arg_size(); 1623 std::vector<int> ArgSizes(NumArgs); 1624 1625 // If we are using the OpenCL Runtime, we need to add the kernel argument 1626 // sizes to the end of the launch-parameter list, so OpenCL can determine 1627 // how big the respective kernel arguments are. 1628 // Here we need to reserve adequate space for that. 1629 Type *ArrayTy; 1630 if (Runtime == GPURuntime::OpenCL) 1631 ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), 2 * NumArgs); 1632 else 1633 ArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumArgs); 1634 1635 BasicBlock *EntryBlock = 1636 &Builder.GetInsertBlock()->getParent()->getEntryBlock(); 1637 auto AddressSpace = F->getParent()->getDataLayout().getAllocaAddrSpace(); 1638 std::string Launch = "polly_launch_" + std::to_string(Kernel->id); 1639 Instruction *Parameters = new AllocaInst( 1640 ArrayTy, AddressSpace, Launch + "_params", EntryBlock->getTerminator()); 1641 1642 int Index = 0; 1643 for (long i = 0; i < Prog->n_array; i++) { 1644 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 1645 continue; 1646 1647 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 1648 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id)); 1649 1650 if (Runtime == GPURuntime::OpenCL) 1651 ArgSizes[Index] = SAI->getElemSizeInBytes(); 1652 1653 Value *DevArray = nullptr; 1654 if (PollyManagedMemory) { 1655 DevArray = getManagedDeviceArray(&Prog->array[i], 1656 const_cast<ScopArrayInfo *>(SAI)); 1657 } else { 1658 DevArray = DeviceAllocations[const_cast<ScopArrayInfo *>(SAI)]; 1659 DevArray = createCallGetDevicePtr(DevArray); 1660 } 1661 assert(DevArray != nullptr && "Array to be offloaded to device not " 1662 "initialized"); 1663 Value *Offset = getArrayOffset(&Prog->array[i]); 1664 1665 if (Offset) { 1666 DevArray = Builder.CreatePointerCast( 1667 DevArray, SAI->getElementType()->getPointerTo()); 1668 DevArray = Builder.CreateGEP(DevArray, Builder.CreateNeg(Offset)); 1669 DevArray = Builder.CreatePointerCast(DevArray, Builder.getInt8PtrTy()); 1670 } 1671 Value *Slot = Builder.CreateGEP( 1672 Parameters, {Builder.getInt64(0), Builder.getInt64(Index)}); 1673 1674 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 1675 Value *ValPtr = nullptr; 1676 if (PollyManagedMemory) 1677 ValPtr = DevArray; 1678 else 1679 ValPtr = BlockGen.getOrCreateAlloca(SAI); 1680 1681 assert(ValPtr != nullptr && "ValPtr that should point to a valid object" 1682 " to be stored into Parameters"); 1683 Value *ValPtrCast = 1684 Builder.CreatePointerCast(ValPtr, Builder.getInt8PtrTy()); 1685 Builder.CreateStore(ValPtrCast, Slot); 1686 } else { 1687 Instruction *Param = 1688 new AllocaInst(Builder.getInt8PtrTy(), AddressSpace, 1689 Launch + "_param_" + std::to_string(Index), 1690 EntryBlock->getTerminator()); 1691 Builder.CreateStore(DevArray, Param); 1692 Value *ParamTyped = 1693 Builder.CreatePointerCast(Param, Builder.getInt8PtrTy()); 1694 Builder.CreateStore(ParamTyped, Slot); 1695 } 1696 Index++; 1697 } 1698 1699 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set); 1700 1701 for (long i = 0; i < NumHostIters; i++) { 1702 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 1703 Value *Val = IDToValue[Id]; 1704 isl_id_free(Id); 1705 1706 if (Runtime == GPURuntime::OpenCL) 1707 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1708 1709 Instruction *Param = 1710 new AllocaInst(Val->getType(), AddressSpace, 1711 Launch + "_param_" + std::to_string(Index), 1712 EntryBlock->getTerminator()); 1713 Builder.CreateStore(Val, Param); 1714 insertStoreParameter(Parameters, Param, Index); 1715 Index++; 1716 } 1717 1718 int NumVars = isl_space_dim(Kernel->space, isl_dim_param); 1719 1720 for (long i = 0; i < NumVars; i++) { 1721 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 1722 Value *Val = IDToValue[Id]; 1723 if (ValueMap.count(Val)) 1724 Val = ValueMap[Val]; 1725 isl_id_free(Id); 1726 1727 if (Runtime == GPURuntime::OpenCL) 1728 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1729 1730 Instruction *Param = 1731 new AllocaInst(Val->getType(), AddressSpace, 1732 Launch + "_param_" + std::to_string(Index), 1733 EntryBlock->getTerminator()); 1734 Builder.CreateStore(Val, Param); 1735 insertStoreParameter(Parameters, Param, Index); 1736 Index++; 1737 } 1738 1739 for (auto Val : SubtreeValues) { 1740 if (Runtime == GPURuntime::OpenCL) 1741 ArgSizes[Index] = computeSizeInBytes(Val->getType()); 1742 1743 Instruction *Param = 1744 new AllocaInst(Val->getType(), AddressSpace, 1745 Launch + "_param_" + std::to_string(Index), 1746 EntryBlock->getTerminator()); 1747 Builder.CreateStore(Val, Param); 1748 insertStoreParameter(Parameters, Param, Index); 1749 Index++; 1750 } 1751 1752 if (Runtime == GPURuntime::OpenCL) { 1753 for (int i = 0; i < NumArgs; i++) { 1754 Value *Val = ConstantInt::get(Builder.getInt32Ty(), ArgSizes[i]); 1755 Instruction *Param = 1756 new AllocaInst(Builder.getInt32Ty(), AddressSpace, 1757 Launch + "_param_size_" + std::to_string(i), 1758 EntryBlock->getTerminator()); 1759 Builder.CreateStore(Val, Param); 1760 insertStoreParameter(Parameters, Param, Index); 1761 Index++; 1762 } 1763 } 1764 1765 auto Location = EntryBlock->getTerminator(); 1766 return new BitCastInst(Parameters, Builder.getInt8PtrTy(), 1767 Launch + "_params_i8ptr", Location); 1768 } 1769 1770 void GPUNodeBuilder::setupKernelSubtreeFunctions( 1771 SetVector<Function *> SubtreeFunctions) { 1772 for (auto Fn : SubtreeFunctions) { 1773 const std::string ClonedFnName = Fn->getName(); 1774 Function *Clone = GPUModule->getFunction(ClonedFnName); 1775 if (!Clone) 1776 Clone = 1777 Function::Create(Fn->getFunctionType(), GlobalValue::ExternalLinkage, 1778 ClonedFnName, GPUModule.get()); 1779 assert(Clone && "Expected cloned function to be initialized."); 1780 assert(ValueMap.find(Fn) == ValueMap.end() && 1781 "Fn already present in ValueMap"); 1782 ValueMap[Fn] = Clone; 1783 } 1784 } 1785 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) { 1786 isl_id *Id = isl_ast_node_get_annotation(KernelStmt); 1787 ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id); 1788 isl_id_free(Id); 1789 isl_ast_node_free(KernelStmt); 1790 1791 if (Kernel->n_grid > 1) 1792 DeepestParallel = 1793 std::max(DeepestParallel, isl_space_dim(Kernel->space, isl_dim_set)); 1794 else 1795 DeepestSequential = 1796 std::max(DeepestSequential, isl_space_dim(Kernel->space, isl_dim_set)); 1797 1798 Value *BlockDimX, *BlockDimY, *BlockDimZ; 1799 std::tie(BlockDimX, BlockDimY, BlockDimZ) = getBlockSizes(Kernel); 1800 1801 SetVector<Value *> SubtreeValues; 1802 SetVector<Function *> SubtreeFunctions; 1803 SetVector<const Loop *> Loops; 1804 isl::space ParamSpace; 1805 std::tie(SubtreeValues, SubtreeFunctions, Loops, ParamSpace) = 1806 getReferencesInKernel(Kernel); 1807 1808 // Add parameters that appear only in the access function to the kernel 1809 // space. This is important to make sure that all isl_ids are passed as 1810 // parameters to the kernel, even though we may not have all parameters 1811 // in the context to improve compile time. 1812 Kernel->space = isl_space_align_params(Kernel->space, ParamSpace.release()); 1813 1814 assert(Kernel->tree && "Device AST of kernel node is empty"); 1815 1816 Instruction &HostInsertPoint = *Builder.GetInsertPoint(); 1817 IslExprBuilder::IDToValueTy HostIDs = IDToValue; 1818 ValueMapT HostValueMap = ValueMap; 1819 BlockGenerator::AllocaMapTy HostScalarMap = ScalarMap; 1820 ScalarMap.clear(); 1821 BlockGenerator::EscapeUsersAllocaMapTy HostEscapeMap = EscapeMap; 1822 EscapeMap.clear(); 1823 1824 // Create for all loops we depend on values that contain the current loop 1825 // iteration. These values are necessary to generate code for SCEVs that 1826 // depend on such loops. As a result we need to pass them to the subfunction. 1827 for (const Loop *L : Loops) { 1828 const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)), 1829 SE.getUnknown(Builder.getInt64(1)), 1830 L, SCEV::FlagAnyWrap); 1831 Value *V = generateSCEV(OuterLIV); 1832 OutsideLoopIterations[L] = SE.getUnknown(V); 1833 SubtreeValues.insert(V); 1834 } 1835 1836 createKernelFunction(Kernel, SubtreeValues, SubtreeFunctions); 1837 setupKernelSubtreeFunctions(SubtreeFunctions); 1838 1839 create(isl_ast_node_copy(Kernel->tree)); 1840 1841 finalizeKernelArguments(Kernel); 1842 Function *F = Builder.GetInsertBlock()->getParent(); 1843 if (Arch == GPUArch::NVPTX64) 1844 addCUDAAnnotations(F->getParent(), BlockDimX, BlockDimY, BlockDimZ); 1845 clearDominators(F); 1846 clearScalarEvolution(F); 1847 clearLoops(F); 1848 1849 IDToValue = HostIDs; 1850 1851 ValueMap = std::move(HostValueMap); 1852 ScalarMap = std::move(HostScalarMap); 1853 EscapeMap = std::move(HostEscapeMap); 1854 IDToSAI.clear(); 1855 Annotator.resetAlternativeAliasBases(); 1856 for (auto &BasePtr : LocalArrays) 1857 S.invalidateScopArrayInfo(BasePtr, MemoryKind::Array); 1858 LocalArrays.clear(); 1859 1860 std::string ASMString = finalizeKernelFunction(); 1861 Builder.SetInsertPoint(&HostInsertPoint); 1862 Value *Parameters = createLaunchParameters(Kernel, F, SubtreeValues); 1863 1864 std::string Name = getKernelFuncName(Kernel->id); 1865 Value *KernelString = Builder.CreateGlobalStringPtr(ASMString, Name); 1866 Value *NameString = Builder.CreateGlobalStringPtr(Name, Name + "_name"); 1867 Value *GPUKernel = createCallGetKernel(KernelString, NameString); 1868 1869 Value *GridDimX, *GridDimY; 1870 std::tie(GridDimX, GridDimY) = getGridSizes(Kernel); 1871 1872 createCallLaunchKernel(GPUKernel, GridDimX, GridDimY, BlockDimX, BlockDimY, 1873 BlockDimZ, Parameters); 1874 createCallFreeKernel(GPUKernel); 1875 1876 for (auto Id : KernelIds) 1877 isl_id_free(Id); 1878 1879 KernelIds.clear(); 1880 } 1881 1882 /// Compute the DataLayout string for the NVPTX backend. 1883 /// 1884 /// @param is64Bit Are we looking for a 64 bit architecture? 1885 static std::string computeNVPTXDataLayout(bool is64Bit) { 1886 std::string Ret = ""; 1887 1888 if (!is64Bit) { 1889 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1890 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:" 1891 "64-v128:128:128-n16:32:64"; 1892 } else { 1893 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1894 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:" 1895 "64-v128:128:128-n16:32:64"; 1896 } 1897 1898 return Ret; 1899 } 1900 1901 /// Compute the DataLayout string for a SPIR kernel. 1902 /// 1903 /// @param is64Bit Are we looking for a 64 bit architecture? 1904 static std::string computeSPIRDataLayout(bool is64Bit) { 1905 std::string Ret = ""; 1906 1907 if (!is64Bit) { 1908 Ret += "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1909 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:" 1910 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:" 1911 "256:256-v256:256:256-v512:512:512-v1024:1024:1024"; 1912 } else { 1913 Ret += "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:" 1914 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:" 1915 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:" 1916 "256:256-v256:256:256-v512:512:512-v1024:1024:1024"; 1917 } 1918 1919 return Ret; 1920 } 1921 1922 Function * 1923 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel, 1924 SetVector<Value *> &SubtreeValues) { 1925 std::vector<Type *> Args; 1926 std::string Identifier = getKernelFuncName(Kernel->id); 1927 1928 std::vector<Metadata *> MemoryType; 1929 1930 for (long i = 0; i < Prog->n_array; i++) { 1931 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 1932 continue; 1933 1934 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 1935 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 1936 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl::manage(Id)); 1937 Args.push_back(SAI->getElementType()); 1938 MemoryType.push_back( 1939 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1940 } else { 1941 static const int UseGlobalMemory = 1; 1942 Args.push_back(Builder.getInt8PtrTy(UseGlobalMemory)); 1943 MemoryType.push_back( 1944 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 1))); 1945 } 1946 } 1947 1948 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set); 1949 1950 for (long i = 0; i < NumHostIters; i++) { 1951 Args.push_back(Builder.getInt64Ty()); 1952 MemoryType.push_back( 1953 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1954 } 1955 1956 int NumVars = isl_space_dim(Kernel->space, isl_dim_param); 1957 1958 for (long i = 0; i < NumVars; i++) { 1959 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 1960 Value *Val = IDToValue[Id]; 1961 isl_id_free(Id); 1962 Args.push_back(Val->getType()); 1963 MemoryType.push_back( 1964 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1965 } 1966 1967 for (auto *V : SubtreeValues) { 1968 Args.push_back(V->getType()); 1969 MemoryType.push_back( 1970 ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), 0))); 1971 } 1972 1973 auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false); 1974 auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier, 1975 GPUModule.get()); 1976 1977 std::vector<Metadata *> EmptyStrings; 1978 1979 for (unsigned int i = 0; i < MemoryType.size(); i++) { 1980 EmptyStrings.push_back(MDString::get(FN->getContext(), "")); 1981 } 1982 1983 if (Arch == GPUArch::SPIR32 || Arch == GPUArch::SPIR64) { 1984 FN->setMetadata("kernel_arg_addr_space", 1985 MDNode::get(FN->getContext(), MemoryType)); 1986 FN->setMetadata("kernel_arg_name", 1987 MDNode::get(FN->getContext(), EmptyStrings)); 1988 FN->setMetadata("kernel_arg_access_qual", 1989 MDNode::get(FN->getContext(), EmptyStrings)); 1990 FN->setMetadata("kernel_arg_type", 1991 MDNode::get(FN->getContext(), EmptyStrings)); 1992 FN->setMetadata("kernel_arg_type_qual", 1993 MDNode::get(FN->getContext(), EmptyStrings)); 1994 FN->setMetadata("kernel_arg_base_type", 1995 MDNode::get(FN->getContext(), EmptyStrings)); 1996 } 1997 1998 switch (Arch) { 1999 case GPUArch::NVPTX64: 2000 FN->setCallingConv(CallingConv::PTX_Kernel); 2001 break; 2002 case GPUArch::SPIR32: 2003 case GPUArch::SPIR64: 2004 FN->setCallingConv(CallingConv::SPIR_KERNEL); 2005 break; 2006 } 2007 2008 auto Arg = FN->arg_begin(); 2009 for (long i = 0; i < Kernel->n_array; i++) { 2010 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 2011 continue; 2012 2013 Arg->setName(Kernel->array[i].array->name); 2014 2015 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 2016 const ScopArrayInfo *SAI = 2017 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id))); 2018 Type *EleTy = SAI->getElementType(); 2019 Value *Val = &*Arg; 2020 SmallVector<const SCEV *, 4> Sizes; 2021 isl_ast_build *Build = 2022 isl_ast_build_from_context(isl_set_copy(Prog->context)); 2023 Sizes.push_back(nullptr); 2024 for (long j = 1, n = Kernel->array[i].array->n_index; j < n; j++) { 2025 isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff( 2026 Build, isl_multi_pw_aff_get_pw_aff(Kernel->array[i].array->bound, j)); 2027 auto V = ExprBuilder.create(DimSize); 2028 Sizes.push_back(SE.getSCEV(V)); 2029 } 2030 const ScopArrayInfo *SAIRep = 2031 S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, MemoryKind::Array); 2032 LocalArrays.push_back(Val); 2033 2034 isl_ast_build_free(Build); 2035 KernelIds.push_back(Id); 2036 IDToSAI[Id] = SAIRep; 2037 Arg++; 2038 } 2039 2040 for (long i = 0; i < NumHostIters; i++) { 2041 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 2042 Arg->setName(isl_id_get_name(Id)); 2043 IDToValue[Id] = &*Arg; 2044 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2045 Arg++; 2046 } 2047 2048 for (long i = 0; i < NumVars; i++) { 2049 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 2050 Arg->setName(isl_id_get_name(Id)); 2051 Value *Val = IDToValue[Id]; 2052 ValueMap[Val] = &*Arg; 2053 IDToValue[Id] = &*Arg; 2054 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2055 Arg++; 2056 } 2057 2058 for (auto *V : SubtreeValues) { 2059 Arg->setName(V->getName()); 2060 ValueMap[V] = &*Arg; 2061 Arg++; 2062 } 2063 2064 return FN; 2065 } 2066 2067 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) { 2068 Intrinsic::ID IntrinsicsBID[2]; 2069 Intrinsic::ID IntrinsicsTID[3]; 2070 2071 switch (Arch) { 2072 case GPUArch::SPIR64: 2073 case GPUArch::SPIR32: 2074 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR"); 2075 case GPUArch::NVPTX64: 2076 IntrinsicsBID[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x; 2077 IntrinsicsBID[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y; 2078 2079 IntrinsicsTID[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x; 2080 IntrinsicsTID[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y; 2081 IntrinsicsTID[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z; 2082 break; 2083 } 2084 2085 auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable { 2086 std::string Name = isl_id_get_name(Id); 2087 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 2088 Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr); 2089 Value *Val = Builder.CreateCall(IntrinsicFn, {}); 2090 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name); 2091 IDToValue[Id] = Val; 2092 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2093 }; 2094 2095 for (int i = 0; i < Kernel->n_grid; ++i) { 2096 isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i); 2097 addId(Id, IntrinsicsBID[i]); 2098 } 2099 2100 for (int i = 0; i < Kernel->n_block; ++i) { 2101 isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i); 2102 addId(Id, IntrinsicsTID[i]); 2103 } 2104 } 2105 2106 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel *Kernel) { 2107 const char *GroupName[3] = {"__gen_ocl_get_group_id0", 2108 "__gen_ocl_get_group_id1", 2109 "__gen_ocl_get_group_id2"}; 2110 2111 const char *LocalName[3] = {"__gen_ocl_get_local_id0", 2112 "__gen_ocl_get_local_id1", 2113 "__gen_ocl_get_local_id2"}; 2114 2115 auto createFunc = [this](const char *Name, __isl_take isl_id *Id) mutable { 2116 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 2117 Function *FN = M->getFunction(Name); 2118 2119 // If FN is not available, declare it. 2120 if (!FN) { 2121 GlobalValue::LinkageTypes Linkage = Function::ExternalLinkage; 2122 std::vector<Type *> Args; 2123 FunctionType *Ty = FunctionType::get(Builder.getInt32Ty(), Args, false); 2124 FN = Function::Create(Ty, Linkage, Name, M); 2125 FN->setCallingConv(CallingConv::SPIR_FUNC); 2126 } 2127 2128 Value *Val = Builder.CreateCall(FN, {}); 2129 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name); 2130 IDToValue[Id] = Val; 2131 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 2132 }; 2133 2134 for (int i = 0; i < Kernel->n_grid; ++i) 2135 createFunc(GroupName[i], isl_id_list_get_id(Kernel->block_ids, i)); 2136 2137 for (int i = 0; i < Kernel->n_block; ++i) 2138 createFunc(LocalName[i], isl_id_list_get_id(Kernel->thread_ids, i)); 2139 } 2140 2141 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel *Kernel, Function *FN) { 2142 auto Arg = FN->arg_begin(); 2143 for (long i = 0; i < Kernel->n_array; i++) { 2144 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 2145 continue; 2146 2147 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 2148 const ScopArrayInfo *SAI = 2149 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id))); 2150 isl_id_free(Id); 2151 2152 if (SAI->getNumberOfDimensions() > 0) { 2153 Arg++; 2154 continue; 2155 } 2156 2157 Value *Val = &*Arg; 2158 2159 if (!gpu_array_is_read_only_scalar(&Prog->array[i])) { 2160 Type *TypePtr = SAI->getElementType()->getPointerTo(); 2161 Value *TypedArgPtr = Builder.CreatePointerCast(Val, TypePtr); 2162 Val = Builder.CreateLoad(TypedArgPtr); 2163 } 2164 2165 Value *Alloca = BlockGen.getOrCreateAlloca(SAI); 2166 Builder.CreateStore(Val, Alloca); 2167 2168 Arg++; 2169 } 2170 } 2171 2172 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel *Kernel) { 2173 auto *FN = Builder.GetInsertBlock()->getParent(); 2174 auto Arg = FN->arg_begin(); 2175 2176 bool StoredScalar = false; 2177 for (long i = 0; i < Kernel->n_array; i++) { 2178 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 2179 continue; 2180 2181 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 2182 const ScopArrayInfo *SAI = 2183 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id))); 2184 isl_id_free(Id); 2185 2186 if (SAI->getNumberOfDimensions() > 0) { 2187 Arg++; 2188 continue; 2189 } 2190 2191 if (gpu_array_is_read_only_scalar(&Prog->array[i])) { 2192 Arg++; 2193 continue; 2194 } 2195 2196 Value *Alloca = BlockGen.getOrCreateAlloca(SAI); 2197 Value *ArgPtr = &*Arg; 2198 Type *TypePtr = SAI->getElementType()->getPointerTo(); 2199 Value *TypedArgPtr = Builder.CreatePointerCast(ArgPtr, TypePtr); 2200 Value *Val = Builder.CreateLoad(Alloca); 2201 Builder.CreateStore(Val, TypedArgPtr); 2202 StoredScalar = true; 2203 2204 Arg++; 2205 } 2206 2207 if (StoredScalar) { 2208 /// In case more than one thread contains scalar stores, the generated 2209 /// code might be incorrect, if we only store at the end of the kernel. 2210 /// To support this case we need to store these scalars back at each 2211 /// memory store or at least before each kernel barrier. 2212 if (Kernel->n_block != 0 || Kernel->n_grid != 0) { 2213 BuildSuccessful = 0; 2214 DEBUG( 2215 dbgs() << getUniqueScopName(&S) 2216 << " has a store to a scalar value that" 2217 " would be undefined to run in parallel. Bailing out.\n";); 2218 } 2219 } 2220 } 2221 2222 void GPUNodeBuilder::createKernelVariables(ppcg_kernel *Kernel, Function *FN) { 2223 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 2224 2225 for (int i = 0; i < Kernel->n_var; ++i) { 2226 struct ppcg_kernel_var &Var = Kernel->var[i]; 2227 isl_id *Id = isl_space_get_tuple_id(Var.array->space, isl_dim_set); 2228 Type *EleTy = ScopArrayInfo::getFromId(isl::manage(Id))->getElementType(); 2229 2230 Type *ArrayTy = EleTy; 2231 SmallVector<const SCEV *, 4> Sizes; 2232 2233 Sizes.push_back(nullptr); 2234 for (unsigned int j = 1; j < Var.array->n_index; ++j) { 2235 isl_val *Val = isl_vec_get_element_val(Var.size, j); 2236 long Bound = isl_val_get_num_si(Val); 2237 isl_val_free(Val); 2238 Sizes.push_back(S.getSE()->getConstant(Builder.getInt64Ty(), Bound)); 2239 } 2240 2241 for (int j = Var.array->n_index - 1; j >= 0; --j) { 2242 isl_val *Val = isl_vec_get_element_val(Var.size, j); 2243 long Bound = isl_val_get_num_si(Val); 2244 isl_val_free(Val); 2245 ArrayTy = ArrayType::get(ArrayTy, Bound); 2246 } 2247 2248 const ScopArrayInfo *SAI; 2249 Value *Allocation; 2250 if (Var.type == ppcg_access_shared) { 2251 auto GlobalVar = new GlobalVariable( 2252 *M, ArrayTy, false, GlobalValue::InternalLinkage, 0, Var.name, 2253 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal, 3); 2254 GlobalVar->setAlignment(EleTy->getPrimitiveSizeInBits() / 8); 2255 GlobalVar->setInitializer(Constant::getNullValue(ArrayTy)); 2256 2257 Allocation = GlobalVar; 2258 } else if (Var.type == ppcg_access_private) { 2259 Allocation = Builder.CreateAlloca(ArrayTy, 0, "private_array"); 2260 } else { 2261 llvm_unreachable("unknown variable type"); 2262 } 2263 SAI = 2264 S.getOrCreateScopArrayInfo(Allocation, EleTy, Sizes, MemoryKind::Array); 2265 Id = isl_id_alloc(S.getIslCtx(), Var.name, nullptr); 2266 IDToValue[Id] = Allocation; 2267 LocalArrays.push_back(Allocation); 2268 KernelIds.push_back(Id); 2269 IDToSAI[Id] = SAI; 2270 } 2271 } 2272 2273 void GPUNodeBuilder::createKernelFunction( 2274 ppcg_kernel *Kernel, SetVector<Value *> &SubtreeValues, 2275 SetVector<Function *> &SubtreeFunctions) { 2276 std::string Identifier = getKernelFuncName(Kernel->id); 2277 GPUModule.reset(new Module(Identifier, Builder.getContext())); 2278 2279 switch (Arch) { 2280 case GPUArch::NVPTX64: 2281 if (Runtime == GPURuntime::CUDA) 2282 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda")); 2283 else if (Runtime == GPURuntime::OpenCL) 2284 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl")); 2285 GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */)); 2286 break; 2287 case GPUArch::SPIR32: 2288 GPUModule->setTargetTriple(Triple::normalize("spir-unknown-unknown")); 2289 GPUModule->setDataLayout(computeSPIRDataLayout(false /* is64Bit */)); 2290 break; 2291 case GPUArch::SPIR64: 2292 GPUModule->setTargetTriple(Triple::normalize("spir64-unknown-unknown")); 2293 GPUModule->setDataLayout(computeSPIRDataLayout(true /* is64Bit */)); 2294 break; 2295 } 2296 2297 Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues); 2298 2299 BasicBlock *PrevBlock = Builder.GetInsertBlock(); 2300 auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN); 2301 2302 DT.addNewBlock(EntryBlock, PrevBlock); 2303 2304 Builder.SetInsertPoint(EntryBlock); 2305 Builder.CreateRetVoid(); 2306 Builder.SetInsertPoint(EntryBlock, EntryBlock->begin()); 2307 2308 ScopDetection::markFunctionAsInvalid(FN); 2309 2310 prepareKernelArguments(Kernel, FN); 2311 createKernelVariables(Kernel, FN); 2312 2313 switch (Arch) { 2314 case GPUArch::NVPTX64: 2315 insertKernelIntrinsics(Kernel); 2316 break; 2317 case GPUArch::SPIR32: 2318 case GPUArch::SPIR64: 2319 insertKernelCallsSPIR(Kernel); 2320 break; 2321 } 2322 } 2323 2324 std::string GPUNodeBuilder::createKernelASM() { 2325 llvm::Triple GPUTriple; 2326 2327 switch (Arch) { 2328 case GPUArch::NVPTX64: 2329 switch (Runtime) { 2330 case GPURuntime::CUDA: 2331 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda")); 2332 break; 2333 case GPURuntime::OpenCL: 2334 GPUTriple = llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl")); 2335 break; 2336 } 2337 break; 2338 case GPUArch::SPIR64: 2339 case GPUArch::SPIR32: 2340 std::string SPIRAssembly; 2341 raw_string_ostream IROstream(SPIRAssembly); 2342 IROstream << *GPUModule; 2343 IROstream.flush(); 2344 return SPIRAssembly; 2345 } 2346 2347 std::string ErrMsg; 2348 auto GPUTarget = TargetRegistry::lookupTarget(GPUTriple.getTriple(), ErrMsg); 2349 2350 if (!GPUTarget) { 2351 errs() << ErrMsg << "\n"; 2352 return ""; 2353 } 2354 2355 TargetOptions Options; 2356 Options.UnsafeFPMath = FastMath; 2357 2358 std::string subtarget; 2359 2360 switch (Arch) { 2361 case GPUArch::NVPTX64: 2362 subtarget = CudaVersion; 2363 break; 2364 case GPUArch::SPIR32: 2365 case GPUArch::SPIR64: 2366 llvm_unreachable("No subtarget for SPIR architecture"); 2367 } 2368 2369 std::unique_ptr<TargetMachine> TargetM(GPUTarget->createTargetMachine( 2370 GPUTriple.getTriple(), subtarget, "", Options, Optional<Reloc::Model>())); 2371 2372 SmallString<0> ASMString; 2373 raw_svector_ostream ASMStream(ASMString); 2374 llvm::legacy::PassManager PM; 2375 2376 PM.add(createTargetTransformInfoWrapperPass(TargetM->getTargetIRAnalysis())); 2377 2378 if (TargetM->addPassesToEmitFile( 2379 PM, ASMStream, TargetMachine::CGFT_AssemblyFile, true /* verify */)) { 2380 errs() << "The target does not support generation of this file type!\n"; 2381 return ""; 2382 } 2383 2384 PM.run(*GPUModule); 2385 2386 return ASMStream.str(); 2387 } 2388 2389 bool GPUNodeBuilder::requiresCUDALibDevice() { 2390 bool RequiresLibDevice = false; 2391 for (Function &F : GPUModule->functions()) { 2392 if (!F.isDeclaration()) 2393 continue; 2394 2395 const std::string CUDALibDeviceFunc = getCUDALibDeviceFuntion(&F); 2396 if (CUDALibDeviceFunc.length() != 0) { 2397 // We need to handle the case where a module looks like this: 2398 // @expf(..) 2399 // @llvm.exp.f64(..) 2400 // Both of these functions would be renamed to `__nv_expf`. 2401 // 2402 // So, we must first check for the existence of the libdevice function. 2403 // If this exists, we replace our current function with it. 2404 // 2405 // If it does not exist, we rename the current function to the 2406 // libdevice functiono name. 2407 if (Function *Replacement = F.getParent()->getFunction(CUDALibDeviceFunc)) 2408 F.replaceAllUsesWith(Replacement); 2409 else 2410 F.setName(CUDALibDeviceFunc); 2411 RequiresLibDevice = true; 2412 } 2413 } 2414 2415 return RequiresLibDevice; 2416 } 2417 2418 void GPUNodeBuilder::addCUDALibDevice() { 2419 if (Arch != GPUArch::NVPTX64) 2420 return; 2421 2422 if (requiresCUDALibDevice()) { 2423 SMDiagnostic Error; 2424 2425 errs() << CUDALibDevice << "\n"; 2426 auto LibDeviceModule = 2427 parseIRFile(CUDALibDevice, Error, GPUModule->getContext()); 2428 2429 if (!LibDeviceModule) { 2430 BuildSuccessful = false; 2431 report_fatal_error("Could not find or load libdevice. Skipping GPU " 2432 "kernel generation. Please set -polly-acc-libdevice " 2433 "accordingly.\n"); 2434 return; 2435 } 2436 2437 Linker L(*GPUModule); 2438 2439 // Set an nvptx64 target triple to avoid linker warnings. The original 2440 // triple of the libdevice files are nvptx-unknown-unknown. 2441 LibDeviceModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda")); 2442 L.linkInModule(std::move(LibDeviceModule), Linker::LinkOnlyNeeded); 2443 } 2444 } 2445 2446 std::string GPUNodeBuilder::finalizeKernelFunction() { 2447 2448 if (verifyModule(*GPUModule)) { 2449 DEBUG(dbgs() << "verifyModule failed on module:\n"; 2450 GPUModule->print(dbgs(), nullptr); dbgs() << "\n";); 2451 DEBUG(dbgs() << "verifyModule Error:\n"; 2452 verifyModule(*GPUModule, &dbgs());); 2453 2454 if (FailOnVerifyModuleFailure) 2455 llvm_unreachable("VerifyModule failed."); 2456 2457 BuildSuccessful = false; 2458 return ""; 2459 } 2460 2461 addCUDALibDevice(); 2462 2463 if (DumpKernelIR) 2464 outs() << *GPUModule << "\n"; 2465 2466 if (Arch != GPUArch::SPIR32 && Arch != GPUArch::SPIR64) { 2467 // Optimize module. 2468 llvm::legacy::PassManager OptPasses; 2469 PassManagerBuilder PassBuilder; 2470 PassBuilder.OptLevel = 3; 2471 PassBuilder.SizeLevel = 0; 2472 PassBuilder.populateModulePassManager(OptPasses); 2473 OptPasses.run(*GPUModule); 2474 } 2475 2476 std::string Assembly = createKernelASM(); 2477 2478 if (DumpKernelASM) 2479 outs() << Assembly << "\n"; 2480 2481 GPUModule.release(); 2482 KernelIDs.clear(); 2483 2484 return Assembly; 2485 } 2486 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff` 2487 /// @param PwAffs The list of piecewise affine functions to create an 2488 /// `isl_pw_aff_list` from. We expect an rvalue ref because 2489 /// all the isl_pw_aff are used up by this function. 2490 /// 2491 /// @returns The `isl_pw_aff_list`. 2492 __isl_give isl_pw_aff_list * 2493 createPwAffList(isl_ctx *Context, 2494 const std::vector<__isl_take isl_pw_aff *> &&PwAffs) { 2495 isl_pw_aff_list *List = isl_pw_aff_list_alloc(Context, PwAffs.size()); 2496 2497 for (unsigned i = 0; i < PwAffs.size(); i++) { 2498 List = isl_pw_aff_list_insert(List, i, PwAffs[i]); 2499 } 2500 return List; 2501 } 2502 2503 /// Align all the `PwAffs` such that they have the same parameter dimensions. 2504 /// 2505 /// We loop over all `pw_aff` and align all of their spaces together to 2506 /// create a common space for all the `pw_aff`. This common space is the 2507 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start 2508 /// with the given `SeedSpace`. 2509 /// @param PwAffs The list of piecewise affine functions we want to align. 2510 /// This is an rvalue reference because the entire vector is 2511 /// used up by the end of the operation. 2512 /// @param SeedSpace The space to start the alignment process with. 2513 /// @returns A std::pair, whose first element is the aligned space, 2514 /// whose second element is the vector of aligned piecewise 2515 /// affines. 2516 static std::pair<__isl_give isl_space *, std::vector<__isl_give isl_pw_aff *>> 2517 alignPwAffs(const std::vector<__isl_take isl_pw_aff *> &&PwAffs, 2518 __isl_take isl_space *SeedSpace) { 2519 assert(SeedSpace && "Invalid seed space given."); 2520 2521 isl_space *AlignSpace = SeedSpace; 2522 for (isl_pw_aff *PwAff : PwAffs) { 2523 isl_space *PwAffSpace = isl_pw_aff_get_domain_space(PwAff); 2524 AlignSpace = isl_space_align_params(AlignSpace, PwAffSpace); 2525 } 2526 std::vector<isl_pw_aff *> AdjustedPwAffs; 2527 2528 for (unsigned i = 0; i < PwAffs.size(); i++) { 2529 isl_pw_aff *Adjusted = PwAffs[i]; 2530 assert(Adjusted && "Invalid pw_aff given."); 2531 Adjusted = isl_pw_aff_align_params(Adjusted, isl_space_copy(AlignSpace)); 2532 AdjustedPwAffs.push_back(Adjusted); 2533 } 2534 return std::make_pair(AlignSpace, AdjustedPwAffs); 2535 } 2536 2537 namespace { 2538 class PPCGCodeGeneration : public ScopPass { 2539 public: 2540 static char ID; 2541 2542 GPURuntime Runtime = GPURuntime::CUDA; 2543 2544 GPUArch Architecture = GPUArch::NVPTX64; 2545 2546 /// The scop that is currently processed. 2547 Scop *S; 2548 2549 LoopInfo *LI; 2550 DominatorTree *DT; 2551 ScalarEvolution *SE; 2552 const DataLayout *DL; 2553 RegionInfo *RI; 2554 2555 PPCGCodeGeneration() : ScopPass(ID) {} 2556 2557 /// Construct compilation options for PPCG. 2558 /// 2559 /// @returns The compilation options. 2560 ppcg_options *createPPCGOptions() { 2561 auto DebugOptions = 2562 (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options)); 2563 auto Options = (ppcg_options *)malloc(sizeof(ppcg_options)); 2564 2565 DebugOptions->dump_schedule_constraints = false; 2566 DebugOptions->dump_schedule = false; 2567 DebugOptions->dump_final_schedule = false; 2568 DebugOptions->dump_sizes = false; 2569 DebugOptions->verbose = false; 2570 2571 Options->debug = DebugOptions; 2572 2573 Options->group_chains = false; 2574 Options->reschedule = true; 2575 Options->scale_tile_loops = false; 2576 Options->wrap = false; 2577 2578 Options->non_negative_parameters = false; 2579 Options->ctx = nullptr; 2580 Options->sizes = nullptr; 2581 2582 Options->tile = true; 2583 Options->tile_size = 32; 2584 2585 Options->isolate_full_tiles = false; 2586 2587 Options->use_private_memory = PrivateMemory; 2588 Options->use_shared_memory = SharedMemory; 2589 Options->max_shared_memory = 48 * 1024; 2590 2591 Options->target = PPCG_TARGET_CUDA; 2592 Options->openmp = false; 2593 Options->linearize_device_arrays = true; 2594 Options->allow_gnu_extensions = false; 2595 2596 Options->unroll_copy_shared = false; 2597 Options->unroll_gpu_tile = false; 2598 Options->live_range_reordering = true; 2599 2600 Options->live_range_reordering = true; 2601 Options->hybrid = false; 2602 Options->opencl_compiler_options = nullptr; 2603 Options->opencl_use_gpu = false; 2604 Options->opencl_n_include_file = 0; 2605 Options->opencl_include_files = nullptr; 2606 Options->opencl_print_kernel_types = false; 2607 Options->opencl_embed_kernel_code = false; 2608 2609 Options->save_schedule_file = nullptr; 2610 Options->load_schedule_file = nullptr; 2611 2612 return Options; 2613 } 2614 2615 /// Get a tagged access relation containing all accesses of type @p AccessTy. 2616 /// 2617 /// Instead of a normal access of the form: 2618 /// 2619 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)] 2620 /// 2621 /// a tagged access has the form 2622 /// 2623 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)] 2624 /// 2625 /// where 'id' is an additional space that references the memory access that 2626 /// triggered the access. 2627 /// 2628 /// @param AccessTy The type of the memory accesses to collect. 2629 /// 2630 /// @return The relation describing all tagged memory accesses. 2631 isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) { 2632 isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace().release()); 2633 2634 for (auto &Stmt : *S) 2635 for (auto &Acc : Stmt) 2636 if (Acc->getType() == AccessTy) { 2637 isl_map *Relation = Acc->getAccessRelation().release(); 2638 Relation = 2639 isl_map_intersect_domain(Relation, Stmt.getDomain().release()); 2640 2641 isl_space *Space = isl_map_get_space(Relation); 2642 Space = isl_space_range(Space); 2643 Space = isl_space_from_range(Space); 2644 Space = 2645 isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release()); 2646 isl_map *Universe = isl_map_universe(Space); 2647 Relation = isl_map_domain_product(Relation, Universe); 2648 Accesses = isl_union_map_add_map(Accesses, Relation); 2649 } 2650 2651 return Accesses; 2652 } 2653 2654 /// Get the set of all read accesses, tagged with the access id. 2655 /// 2656 /// @see getTaggedAccesses 2657 isl_union_map *getTaggedReads() { 2658 return getTaggedAccesses(MemoryAccess::READ); 2659 } 2660 2661 /// Get the set of all may (and must) accesses, tagged with the access id. 2662 /// 2663 /// @see getTaggedAccesses 2664 isl_union_map *getTaggedMayWrites() { 2665 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE), 2666 getTaggedAccesses(MemoryAccess::MUST_WRITE)); 2667 } 2668 2669 /// Get the set of all must accesses, tagged with the access id. 2670 /// 2671 /// @see getTaggedAccesses 2672 isl_union_map *getTaggedMustWrites() { 2673 return getTaggedAccesses(MemoryAccess::MUST_WRITE); 2674 } 2675 2676 /// Collect parameter and array names as isl_ids. 2677 /// 2678 /// To reason about the different parameters and arrays used, ppcg requires 2679 /// a list of all isl_ids in use. As PPCG traditionally performs 2680 /// source-to-source compilation each of these isl_ids is mapped to the 2681 /// expression that represents it. As we do not have a corresponding 2682 /// expression in Polly, we just map each id to a 'zero' expression to match 2683 /// the data format that ppcg expects. 2684 /// 2685 /// @returns Retun a map from collected ids to 'zero' ast expressions. 2686 __isl_give isl_id_to_ast_expr *getNames() { 2687 auto *Names = isl_id_to_ast_expr_alloc( 2688 S->getIslCtx(), 2689 S->getNumParams() + std::distance(S->array_begin(), S->array_end())); 2690 auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx())); 2691 2692 for (const SCEV *P : S->parameters()) { 2693 isl_id *Id = S->getIdForParam(P).release(); 2694 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero)); 2695 } 2696 2697 for (auto &Array : S->arrays()) { 2698 auto Id = Array->getBasePtrId().release(); 2699 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero)); 2700 } 2701 2702 isl_ast_expr_free(Zero); 2703 2704 return Names; 2705 } 2706 2707 /// Create a new PPCG scop from the current scop. 2708 /// 2709 /// The PPCG scop is initialized with data from the current polly::Scop. From 2710 /// this initial data, the data-dependences in the PPCG scop are initialized. 2711 /// We do not use Polly's dependence analysis for now, to ensure we match 2712 /// the PPCG default behaviour more closely. 2713 /// 2714 /// @returns A new ppcg scop. 2715 ppcg_scop *createPPCGScop() { 2716 MustKillsInfo KillsInfo = computeMustKillsInfo(*S); 2717 2718 auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop)); 2719 2720 PPCGScop->options = createPPCGOptions(); 2721 // enable live range reordering 2722 PPCGScop->options->live_range_reordering = 1; 2723 2724 PPCGScop->start = 0; 2725 PPCGScop->end = 0; 2726 2727 PPCGScop->context = S->getContext().release(); 2728 PPCGScop->domain = S->getDomains().release(); 2729 // TODO: investigate this further. PPCG calls collect_call_domains. 2730 PPCGScop->call = isl_union_set_from_set(S->getContext().release()); 2731 PPCGScop->tagged_reads = getTaggedReads(); 2732 PPCGScop->reads = S->getReads().release(); 2733 PPCGScop->live_in = nullptr; 2734 PPCGScop->tagged_may_writes = getTaggedMayWrites(); 2735 PPCGScop->may_writes = S->getWrites().release(); 2736 PPCGScop->tagged_must_writes = getTaggedMustWrites(); 2737 PPCGScop->must_writes = S->getMustWrites().release(); 2738 PPCGScop->live_out = nullptr; 2739 PPCGScop->tagged_must_kills = KillsInfo.TaggedMustKills.take(); 2740 PPCGScop->must_kills = KillsInfo.MustKills.take(); 2741 2742 PPCGScop->tagger = nullptr; 2743 PPCGScop->independence = 2744 isl_union_map_empty(isl_set_get_space(PPCGScop->context)); 2745 PPCGScop->dep_flow = nullptr; 2746 PPCGScop->tagged_dep_flow = nullptr; 2747 PPCGScop->dep_false = nullptr; 2748 PPCGScop->dep_forced = nullptr; 2749 PPCGScop->dep_order = nullptr; 2750 PPCGScop->tagged_dep_order = nullptr; 2751 2752 PPCGScop->schedule = S->getScheduleTree().release(); 2753 // If we have something non-trivial to kill, add it to the schedule 2754 if (KillsInfo.KillsSchedule.get()) 2755 PPCGScop->schedule = isl_schedule_sequence( 2756 PPCGScop->schedule, KillsInfo.KillsSchedule.take()); 2757 2758 PPCGScop->names = getNames(); 2759 PPCGScop->pet = nullptr; 2760 2761 compute_tagger(PPCGScop); 2762 compute_dependences(PPCGScop); 2763 eliminate_dead_code(PPCGScop); 2764 2765 return PPCGScop; 2766 } 2767 2768 /// Collect the array accesses in a statement. 2769 /// 2770 /// @param Stmt The statement for which to collect the accesses. 2771 /// 2772 /// @returns A list of array accesses. 2773 gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) { 2774 gpu_stmt_access *Accesses = nullptr; 2775 2776 for (MemoryAccess *Acc : Stmt) { 2777 auto Access = isl_alloc_type(S->getIslCtx(), struct gpu_stmt_access); 2778 Access->read = Acc->isRead(); 2779 Access->write = Acc->isWrite(); 2780 Access->access = Acc->getAccessRelation().release(); 2781 isl_space *Space = isl_map_get_space(Access->access); 2782 Space = isl_space_range(Space); 2783 Space = isl_space_from_range(Space); 2784 Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId().release()); 2785 isl_map *Universe = isl_map_universe(Space); 2786 Access->tagged_access = 2787 isl_map_domain_product(Acc->getAccessRelation().release(), Universe); 2788 Access->exact_write = !Acc->isMayWrite(); 2789 Access->ref_id = Acc->getId().release(); 2790 Access->next = Accesses; 2791 Access->n_index = Acc->getScopArrayInfo()->getNumberOfDimensions(); 2792 // TODO: Also mark one-element accesses to arrays as fixed-element. 2793 Access->fixed_element = 2794 Acc->isLatestScalarKind() ? isl_bool_true : isl_bool_false; 2795 Accesses = Access; 2796 } 2797 2798 return Accesses; 2799 } 2800 2801 /// Collect the list of GPU statements. 2802 /// 2803 /// Each statement has an id, a pointer to the underlying data structure, 2804 /// as well as a list with all memory accesses. 2805 /// 2806 /// TODO: Initialize the list of memory accesses. 2807 /// 2808 /// @returns A linked-list of statements. 2809 gpu_stmt *getStatements() { 2810 gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx(), struct gpu_stmt, 2811 std::distance(S->begin(), S->end())); 2812 2813 int i = 0; 2814 for (auto &Stmt : *S) { 2815 gpu_stmt *GPUStmt = &Stmts[i]; 2816 2817 GPUStmt->id = Stmt.getDomainId().release(); 2818 2819 // We use the pet stmt pointer to keep track of the Polly statements. 2820 GPUStmt->stmt = (pet_stmt *)&Stmt; 2821 GPUStmt->accesses = getStmtAccesses(Stmt); 2822 i++; 2823 } 2824 2825 return Stmts; 2826 } 2827 2828 /// Derive the extent of an array. 2829 /// 2830 /// The extent of an array is the set of elements that are within the 2831 /// accessed array. For the inner dimensions, the extent constraints are 2832 /// 0 and the size of the corresponding array dimension. For the first 2833 /// (outermost) dimension, the extent constraints are the minimal and maximal 2834 /// subscript value for the first dimension. 2835 /// 2836 /// @param Array The array to derive the extent for. 2837 /// 2838 /// @returns An isl_set describing the extent of the array. 2839 isl::set getExtent(ScopArrayInfo *Array) { 2840 unsigned NumDims = Array->getNumberOfDimensions(); 2841 2842 if (Array->getNumberOfDimensions() == 0) 2843 return isl::set::universe(Array->getSpace()); 2844 2845 isl::union_map Accesses = S->getAccesses(Array); 2846 isl::union_set AccessUSet = Accesses.range(); 2847 AccessUSet = AccessUSet.coalesce(); 2848 AccessUSet = AccessUSet.detect_equalities(); 2849 AccessUSet = AccessUSet.coalesce(); 2850 2851 if (AccessUSet.is_empty()) 2852 return isl::set::empty(Array->getSpace()); 2853 2854 isl::set AccessSet = AccessUSet.extract_set(Array->getSpace()); 2855 2856 isl::local_space LS = isl::local_space(Array->getSpace()); 2857 2858 isl::pw_aff Val = isl::aff::var_on_domain(LS, isl::dim::set, 0); 2859 isl::pw_aff OuterMin = AccessSet.dim_min(0); 2860 isl::pw_aff OuterMax = AccessSet.dim_max(0); 2861 OuterMin = OuterMin.add_dims(isl::dim::in, Val.dim(isl::dim::in)); 2862 OuterMax = OuterMax.add_dims(isl::dim::in, Val.dim(isl::dim::in)); 2863 OuterMin = OuterMin.set_tuple_id(isl::dim::in, Array->getBasePtrId()); 2864 OuterMax = OuterMax.set_tuple_id(isl::dim::in, Array->getBasePtrId()); 2865 2866 isl::set Extent = isl::set::universe(Array->getSpace()); 2867 2868 Extent = Extent.intersect(OuterMin.le_set(Val)); 2869 Extent = Extent.intersect(OuterMax.ge_set(Val)); 2870 2871 for (unsigned i = 1; i < NumDims; ++i) 2872 Extent = Extent.lower_bound_si(isl::dim::set, i, 0); 2873 2874 for (unsigned i = 0; i < NumDims; ++i) { 2875 isl::pw_aff PwAff = Array->getDimensionSizePw(i); 2876 2877 // isl_pw_aff can be NULL for zero dimension. Only in the case of a 2878 // Fortran array will we have a legitimate dimension. 2879 if (PwAff.is_null()) { 2880 assert(i == 0 && "invalid dimension isl_pw_aff for nonzero dimension"); 2881 continue; 2882 } 2883 2884 isl::pw_aff Val = isl::aff::var_on_domain( 2885 isl::local_space(Array->getSpace()), isl::dim::set, i); 2886 PwAff = PwAff.add_dims(isl::dim::in, Val.dim(isl::dim::in)); 2887 PwAff = PwAff.set_tuple_id(isl::dim::in, Val.get_tuple_id(isl::dim::in)); 2888 isl::set Set = PwAff.gt_set(Val); 2889 Extent = Set.intersect(Extent); 2890 } 2891 2892 return Extent; 2893 } 2894 2895 /// Derive the bounds of an array. 2896 /// 2897 /// For the first dimension we derive the bound of the array from the extent 2898 /// of this dimension. For inner dimensions we obtain their size directly from 2899 /// ScopArrayInfo. 2900 /// 2901 /// @param PPCGArray The array to compute bounds for. 2902 /// @param Array The polly array from which to take the information. 2903 void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) { 2904 std::vector<isl_pw_aff *> Bounds; 2905 2906 if (PPCGArray.n_index > 0) { 2907 if (isl_set_is_empty(PPCGArray.extent)) { 2908 isl_set *Dom = isl_set_copy(PPCGArray.extent); 2909 isl_local_space *LS = isl_local_space_from_space( 2910 isl_space_params(isl_set_get_space(Dom))); 2911 isl_set_free(Dom); 2912 isl_pw_aff *Zero = isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS)); 2913 Bounds.push_back(Zero); 2914 } else { 2915 isl_set *Dom = isl_set_copy(PPCGArray.extent); 2916 Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1); 2917 isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0); 2918 isl_set_free(Dom); 2919 Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound)); 2920 isl_local_space *LS = 2921 isl_local_space_from_space(isl_set_get_space(Dom)); 2922 isl_aff *One = isl_aff_zero_on_domain(LS); 2923 One = isl_aff_add_constant_si(One, 1); 2924 Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One)); 2925 Bound = isl_pw_aff_gist(Bound, S->getContext().release()); 2926 Bounds.push_back(Bound); 2927 } 2928 } 2929 2930 for (unsigned i = 1; i < PPCGArray.n_index; ++i) { 2931 isl_pw_aff *Bound = Array->getDimensionSizePw(i).release(); 2932 auto LS = isl_pw_aff_get_domain_space(Bound); 2933 auto Aff = isl_multi_aff_zero(LS); 2934 2935 // We need types to work out, which is why we perform this weird dance 2936 // with `Aff` and `Bound`. Consider this example: 2937 2938 // LS: [p] -> { [] } 2939 // Zero: [p] -> { [] } | Implicitly, is [p] -> { ~ -> [] }. 2940 // This `~` is used to denote a "null space" (which is different from 2941 // a *zero dimensional* space), which is something that ISL does not 2942 // show you when pretty printing. 2943 2944 // Bound: [p] -> { [] -> [(10p)] } | Here, the [] is a *zero dimensional* 2945 // space, not a "null space" which does not exist at all. 2946 2947 // When we pullback (precompose) `Bound` with `Zero`, we get: 2948 // Bound . Zero = 2949 // ([p] -> { [] -> [(10p)] }) . ([p] -> {~ -> [] }) = 2950 // [p] -> { ~ -> [(10p)] } = 2951 // [p] -> [(10p)] (as ISL pretty prints it) 2952 // Bound Pullback: [p] -> { [(10p)] } 2953 2954 // We want this kind of an expression for Bound, without a 2955 // zero dimensional input, but with a "null space" input for the types 2956 // to work out later on, as far as I (Siddharth Bhat) understand. 2957 // I was unable to find a reference to this in the ISL manual. 2958 // References: Tobias Grosser. 2959 2960 Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff); 2961 Bounds.push_back(Bound); 2962 } 2963 2964 /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff` 2965 /// to have the same parameter dimensions. So, we need to align them to an 2966 /// appropriate space. 2967 /// Scop::Context is _not_ an appropriate space, because when we have 2968 /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not 2969 /// contain all parameter dimensions. 2970 /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together. 2971 isl_space *SeedAlignSpace = S->getParamSpace().release(); 2972 SeedAlignSpace = isl_space_add_dims(SeedAlignSpace, isl_dim_set, 1); 2973 2974 isl_space *AlignSpace = nullptr; 2975 std::vector<isl_pw_aff *> AlignedBounds; 2976 std::tie(AlignSpace, AlignedBounds) = 2977 alignPwAffs(std::move(Bounds), SeedAlignSpace); 2978 2979 assert(AlignSpace && "alignPwAffs did not initialise AlignSpace"); 2980 2981 isl_pw_aff_list *BoundsList = 2982 createPwAffList(S->getIslCtx(), std::move(AlignedBounds)); 2983 2984 isl_space *BoundsSpace = isl_set_get_space(PPCGArray.extent); 2985 BoundsSpace = isl_space_align_params(BoundsSpace, AlignSpace); 2986 2987 assert(BoundsSpace && "Unable to access space of array."); 2988 assert(BoundsList && "Unable to access list of bounds."); 2989 2990 PPCGArray.bound = 2991 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace, BoundsList); 2992 assert(PPCGArray.bound && "PPCGArray.bound was not constructed correctly."); 2993 } 2994 2995 /// Create the arrays for @p PPCGProg. 2996 /// 2997 /// @param PPCGProg The program to compute the arrays for. 2998 void createArrays(gpu_prog *PPCGProg, 2999 const SmallVector<ScopArrayInfo *, 4> &ValidSAIs) { 3000 int i = 0; 3001 for (auto &Array : ValidSAIs) { 3002 std::string TypeName; 3003 raw_string_ostream OS(TypeName); 3004 3005 OS << *Array->getElementType(); 3006 TypeName = OS.str(); 3007 3008 gpu_array_info &PPCGArray = PPCGProg->array[i]; 3009 3010 PPCGArray.space = Array->getSpace().release(); 3011 PPCGArray.type = strdup(TypeName.c_str()); 3012 PPCGArray.size = DL->getTypeAllocSize(Array->getElementType()); 3013 PPCGArray.name = strdup(Array->getName().c_str()); 3014 PPCGArray.extent = nullptr; 3015 PPCGArray.n_index = Array->getNumberOfDimensions(); 3016 PPCGArray.extent = getExtent(Array).release(); 3017 PPCGArray.n_ref = 0; 3018 PPCGArray.refs = nullptr; 3019 PPCGArray.accessed = true; 3020 PPCGArray.read_only_scalar = 3021 Array->isReadOnly() && Array->getNumberOfDimensions() == 0; 3022 PPCGArray.has_compound_element = false; 3023 PPCGArray.local = false; 3024 PPCGArray.declare_local = false; 3025 PPCGArray.global = false; 3026 PPCGArray.linearize = false; 3027 PPCGArray.dep_order = nullptr; 3028 PPCGArray.user = Array; 3029 3030 PPCGArray.bound = nullptr; 3031 setArrayBounds(PPCGArray, Array); 3032 i++; 3033 3034 collect_references(PPCGProg, &PPCGArray); 3035 PPCGArray.only_fixed_element = only_fixed_element_accessed(&PPCGArray); 3036 } 3037 } 3038 3039 /// Create an identity map between the arrays in the scop. 3040 /// 3041 /// @returns An identity map between the arrays in the scop. 3042 isl_union_map *getArrayIdentity() { 3043 isl_union_map *Maps = isl_union_map_empty(S->getParamSpace().release()); 3044 3045 for (auto &Array : S->arrays()) { 3046 isl_space *Space = Array->getSpace().release(); 3047 Space = isl_space_map_from_set(Space); 3048 isl_map *Identity = isl_map_identity(Space); 3049 Maps = isl_union_map_add_map(Maps, Identity); 3050 } 3051 3052 return Maps; 3053 } 3054 3055 /// Create a default-initialized PPCG GPU program. 3056 /// 3057 /// @returns A new gpu program description. 3058 gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) { 3059 3060 if (!PPCGScop) 3061 return nullptr; 3062 3063 auto PPCGProg = isl_calloc_type(S->getIslCtx(), struct gpu_prog); 3064 3065 PPCGProg->ctx = S->getIslCtx(); 3066 PPCGProg->scop = PPCGScop; 3067 PPCGProg->context = isl_set_copy(PPCGScop->context); 3068 PPCGProg->read = isl_union_map_copy(PPCGScop->reads); 3069 PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes); 3070 PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes); 3071 PPCGProg->tagged_must_kill = 3072 isl_union_map_copy(PPCGScop->tagged_must_kills); 3073 PPCGProg->to_inner = getArrayIdentity(); 3074 PPCGProg->to_outer = getArrayIdentity(); 3075 // TODO: verify that this assignment is correct. 3076 PPCGProg->any_to_outer = nullptr; 3077 PPCGProg->n_stmts = std::distance(S->begin(), S->end()); 3078 PPCGProg->stmts = getStatements(); 3079 3080 // Only consider arrays that have a non-empty extent. 3081 // Otherwise, this will cause us to consider the following kinds of 3082 // empty arrays: 3083 // 1. Invariant loads that are represented by SAI objects. 3084 // 2. Arrays with statically known zero size. 3085 auto ValidSAIsRange = 3086 make_filter_range(S->arrays(), [this](ScopArrayInfo *SAI) -> bool { 3087 return !getExtent(SAI).is_empty(); 3088 }); 3089 SmallVector<ScopArrayInfo *, 4> ValidSAIs(ValidSAIsRange.begin(), 3090 ValidSAIsRange.end()); 3091 3092 PPCGProg->n_array = 3093 ValidSAIs.size(); // std::distance(S->array_begin(), S->array_end()); 3094 PPCGProg->array = isl_calloc_array(S->getIslCtx(), struct gpu_array_info, 3095 PPCGProg->n_array); 3096 3097 createArrays(PPCGProg, ValidSAIs); 3098 3099 PPCGProg->array_order = nullptr; 3100 collect_order_dependences(PPCGProg); 3101 3102 PPCGProg->may_persist = compute_may_persist(PPCGProg); 3103 return PPCGProg; 3104 } 3105 3106 struct PrintGPUUserData { 3107 struct cuda_info *CudaInfo; 3108 struct gpu_prog *PPCGProg; 3109 std::vector<ppcg_kernel *> Kernels; 3110 }; 3111 3112 /// Print a user statement node in the host code. 3113 /// 3114 /// We use ppcg's printing facilities to print the actual statement and 3115 /// additionally build up a list of all kernels that are encountered in the 3116 /// host ast. 3117 /// 3118 /// @param P The printer to print to 3119 /// @param Options The printing options to use 3120 /// @param Node The node to print 3121 /// @param User A user pointer to carry additional data. This pointer is 3122 /// expected to be of type PrintGPUUserData. 3123 /// 3124 /// @returns A printer to which the output has been printed. 3125 static __isl_give isl_printer * 3126 printHostUser(__isl_take isl_printer *P, 3127 __isl_take isl_ast_print_options *Options, 3128 __isl_take isl_ast_node *Node, void *User) { 3129 auto Data = (struct PrintGPUUserData *)User; 3130 auto Id = isl_ast_node_get_annotation(Node); 3131 3132 if (Id) { 3133 bool IsUser = !strcmp(isl_id_get_name(Id), "user"); 3134 3135 // If this is a user statement, format it ourselves as ppcg would 3136 // otherwise try to call pet functionality that is not available in 3137 // Polly. 3138 if (IsUser) { 3139 P = isl_printer_start_line(P); 3140 P = isl_printer_print_ast_node(P, Node); 3141 P = isl_printer_end_line(P); 3142 isl_id_free(Id); 3143 isl_ast_print_options_free(Options); 3144 return P; 3145 } 3146 3147 auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id); 3148 isl_id_free(Id); 3149 Data->Kernels.push_back(Kernel); 3150 } 3151 3152 return print_host_user(P, Options, Node, User); 3153 } 3154 3155 /// Print C code corresponding to the control flow in @p Kernel. 3156 /// 3157 /// @param Kernel The kernel to print 3158 void printKernel(ppcg_kernel *Kernel) { 3159 auto *P = isl_printer_to_str(S->getIslCtx()); 3160 P = isl_printer_set_output_format(P, ISL_FORMAT_C); 3161 auto *Options = isl_ast_print_options_alloc(S->getIslCtx()); 3162 P = isl_ast_node_print(Kernel->tree, P, Options); 3163 char *String = isl_printer_get_str(P); 3164 printf("%s\n", String); 3165 free(String); 3166 isl_printer_free(P); 3167 } 3168 3169 /// Print C code corresponding to the GPU code described by @p Tree. 3170 /// 3171 /// @param Tree An AST describing GPU code 3172 /// @param PPCGProg The PPCG program from which @Tree has been constructed. 3173 void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) { 3174 auto *P = isl_printer_to_str(S->getIslCtx()); 3175 P = isl_printer_set_output_format(P, ISL_FORMAT_C); 3176 3177 PrintGPUUserData Data; 3178 Data.PPCGProg = PPCGProg; 3179 3180 auto *Options = isl_ast_print_options_alloc(S->getIslCtx()); 3181 Options = 3182 isl_ast_print_options_set_print_user(Options, printHostUser, &Data); 3183 P = isl_ast_node_print(Tree, P, Options); 3184 char *String = isl_printer_get_str(P); 3185 printf("# host\n"); 3186 printf("%s\n", String); 3187 free(String); 3188 isl_printer_free(P); 3189 3190 for (auto Kernel : Data.Kernels) { 3191 printf("# kernel%d\n", Kernel->id); 3192 printKernel(Kernel); 3193 } 3194 } 3195 3196 // Generate a GPU program using PPCG. 3197 // 3198 // GPU mapping consists of multiple steps: 3199 // 3200 // 1) Compute new schedule for the program. 3201 // 2) Map schedule to GPU (TODO) 3202 // 3) Generate code for new schedule (TODO) 3203 // 3204 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer 3205 // is mostly CPU specific. Instead, we use PPCG's GPU code generation 3206 // strategy directly from this pass. 3207 gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) { 3208 3209 auto PPCGGen = isl_calloc_type(S->getIslCtx(), struct gpu_gen); 3210 3211 PPCGGen->ctx = S->getIslCtx(); 3212 PPCGGen->options = PPCGScop->options; 3213 PPCGGen->print = nullptr; 3214 PPCGGen->print_user = nullptr; 3215 PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt; 3216 PPCGGen->prog = PPCGProg; 3217 PPCGGen->tree = nullptr; 3218 PPCGGen->types.n = 0; 3219 PPCGGen->types.name = nullptr; 3220 PPCGGen->sizes = nullptr; 3221 PPCGGen->used_sizes = nullptr; 3222 PPCGGen->kernel_id = 0; 3223 3224 // Set scheduling strategy to same strategy PPCG is using. 3225 isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true); 3226 isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true); 3227 isl_options_set_schedule_whole_component(PPCGGen->ctx, false); 3228 3229 isl_schedule *Schedule = get_schedule(PPCGGen); 3230 3231 int has_permutable = has_any_permutable_node(Schedule); 3232 3233 Schedule = 3234 isl_schedule_align_params(Schedule, S->getFullParamSpace().release()); 3235 3236 if (!has_permutable || has_permutable < 0) { 3237 Schedule = isl_schedule_free(Schedule); 3238 DEBUG(dbgs() << getUniqueScopName(S) 3239 << " does not have permutable bands. Bailing out\n";); 3240 } else { 3241 const bool CreateTransferToFromDevice = !PollyManagedMemory; 3242 Schedule = map_to_device(PPCGGen, Schedule, CreateTransferToFromDevice); 3243 PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule)); 3244 } 3245 3246 if (DumpSchedule) { 3247 isl_printer *P = isl_printer_to_str(S->getIslCtx()); 3248 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK); 3249 P = isl_printer_print_str(P, "Schedule\n"); 3250 P = isl_printer_print_str(P, "========\n"); 3251 if (Schedule) 3252 P = isl_printer_print_schedule(P, Schedule); 3253 else 3254 P = isl_printer_print_str(P, "No schedule found\n"); 3255 3256 printf("%s\n", isl_printer_get_str(P)); 3257 isl_printer_free(P); 3258 } 3259 3260 if (DumpCode) { 3261 printf("Code\n"); 3262 printf("====\n"); 3263 if (PPCGGen->tree) 3264 printGPUTree(PPCGGen->tree, PPCGProg); 3265 else 3266 printf("No code generated\n"); 3267 } 3268 3269 isl_schedule_free(Schedule); 3270 3271 return PPCGGen; 3272 } 3273 3274 /// Free gpu_gen structure. 3275 /// 3276 /// @param PPCGGen The ppcg_gen object to free. 3277 void freePPCGGen(gpu_gen *PPCGGen) { 3278 isl_ast_node_free(PPCGGen->tree); 3279 isl_union_map_free(PPCGGen->sizes); 3280 isl_union_map_free(PPCGGen->used_sizes); 3281 free(PPCGGen); 3282 } 3283 3284 /// Free the options in the ppcg scop structure. 3285 /// 3286 /// ppcg is not freeing these options for us. To avoid leaks we do this 3287 /// ourselves. 3288 /// 3289 /// @param PPCGScop The scop referencing the options to free. 3290 void freeOptions(ppcg_scop *PPCGScop) { 3291 free(PPCGScop->options->debug); 3292 PPCGScop->options->debug = nullptr; 3293 free(PPCGScop->options); 3294 PPCGScop->options = nullptr; 3295 } 3296 3297 /// Approximate the number of points in the set. 3298 /// 3299 /// This function returns an ast expression that overapproximates the number 3300 /// of points in an isl set through the rectangular hull surrounding this set. 3301 /// 3302 /// @param Set The set to count. 3303 /// @param Build The isl ast build object to use for creating the ast 3304 /// expression. 3305 /// 3306 /// @returns An approximation of the number of points in the set. 3307 __isl_give isl_ast_expr *approxPointsInSet(__isl_take isl_set *Set, 3308 __isl_keep isl_ast_build *Build) { 3309 3310 isl_val *One = isl_val_int_from_si(isl_set_get_ctx(Set), 1); 3311 auto *Expr = isl_ast_expr_from_val(isl_val_copy(One)); 3312 3313 isl_space *Space = isl_set_get_space(Set); 3314 Space = isl_space_params(Space); 3315 auto *Univ = isl_set_universe(Space); 3316 isl_pw_aff *OneAff = isl_pw_aff_val_on_domain(Univ, One); 3317 3318 for (long i = 0, n = isl_set_dim(Set, isl_dim_set); i < n; i++) { 3319 isl_pw_aff *Max = isl_set_dim_max(isl_set_copy(Set), i); 3320 isl_pw_aff *Min = isl_set_dim_min(isl_set_copy(Set), i); 3321 isl_pw_aff *DimSize = isl_pw_aff_sub(Max, Min); 3322 DimSize = isl_pw_aff_add(DimSize, isl_pw_aff_copy(OneAff)); 3323 auto DimSizeExpr = isl_ast_build_expr_from_pw_aff(Build, DimSize); 3324 Expr = isl_ast_expr_mul(Expr, DimSizeExpr); 3325 } 3326 3327 isl_set_free(Set); 3328 isl_pw_aff_free(OneAff); 3329 3330 return Expr; 3331 } 3332 3333 /// Approximate a number of dynamic instructions executed by a given 3334 /// statement. 3335 /// 3336 /// @param Stmt The statement for which to compute the number of dynamic 3337 /// instructions. 3338 /// @param Build The isl ast build object to use for creating the ast 3339 /// expression. 3340 /// @returns An approximation of the number of dynamic instructions executed 3341 /// by @p Stmt. 3342 __isl_give isl_ast_expr *approxDynamicInst(ScopStmt &Stmt, 3343 __isl_keep isl_ast_build *Build) { 3344 auto Iterations = approxPointsInSet(Stmt.getDomain().release(), Build); 3345 3346 long InstCount = 0; 3347 3348 if (Stmt.isBlockStmt()) { 3349 auto *BB = Stmt.getBasicBlock(); 3350 InstCount = std::distance(BB->begin(), BB->end()); 3351 } else { 3352 auto *R = Stmt.getRegion(); 3353 3354 for (auto *BB : R->blocks()) { 3355 InstCount += std::distance(BB->begin(), BB->end()); 3356 } 3357 } 3358 3359 isl_val *InstVal = isl_val_int_from_si(S->getIslCtx(), InstCount); 3360 auto *InstExpr = isl_ast_expr_from_val(InstVal); 3361 return isl_ast_expr_mul(InstExpr, Iterations); 3362 } 3363 3364 /// Approximate dynamic instructions executed in scop. 3365 /// 3366 /// @param S The scop for which to approximate dynamic instructions. 3367 /// @param Build The isl ast build object to use for creating the ast 3368 /// expression. 3369 /// @returns An approximation of the number of dynamic instructions executed 3370 /// in @p S. 3371 __isl_give isl_ast_expr * 3372 getNumberOfIterations(Scop &S, __isl_keep isl_ast_build *Build) { 3373 isl_ast_expr *Instructions; 3374 3375 isl_val *Zero = isl_val_int_from_si(S.getIslCtx(), 0); 3376 Instructions = isl_ast_expr_from_val(Zero); 3377 3378 for (ScopStmt &Stmt : S) { 3379 isl_ast_expr *StmtInstructions = approxDynamicInst(Stmt, Build); 3380 Instructions = isl_ast_expr_add(Instructions, StmtInstructions); 3381 } 3382 return Instructions; 3383 } 3384 3385 /// Create a check that ensures sufficient compute in scop. 3386 /// 3387 /// @param S The scop for which to ensure sufficient compute. 3388 /// @param Build The isl ast build object to use for creating the ast 3389 /// expression. 3390 /// @returns An expression that evaluates to TRUE in case of sufficient 3391 /// compute and to FALSE, otherwise. 3392 __isl_give isl_ast_expr * 3393 createSufficientComputeCheck(Scop &S, __isl_keep isl_ast_build *Build) { 3394 auto Iterations = getNumberOfIterations(S, Build); 3395 auto *MinComputeVal = isl_val_int_from_si(S.getIslCtx(), MinCompute); 3396 auto *MinComputeExpr = isl_ast_expr_from_val(MinComputeVal); 3397 return isl_ast_expr_ge(Iterations, MinComputeExpr); 3398 } 3399 3400 /// Check if the basic block contains a function we cannot codegen for GPU 3401 /// kernels. 3402 /// 3403 /// If this basic block does something with a `Function` other than calling 3404 /// a function that we support in a kernel, return true. 3405 bool containsInvalidKernelFunctionInBlock(const BasicBlock *BB, 3406 bool AllowCUDALibDevice) { 3407 for (const Instruction &Inst : *BB) { 3408 const CallInst *Call = dyn_cast<CallInst>(&Inst); 3409 if (Call && isValidFunctionInKernel(Call->getCalledFunction(), 3410 AllowCUDALibDevice)) 3411 continue; 3412 3413 for (Value *Op : Inst.operands()) 3414 // Look for (<func-type>*) among operands of Inst 3415 if (auto PtrTy = dyn_cast<PointerType>(Op->getType())) { 3416 if (isa<FunctionType>(PtrTy->getElementType())) { 3417 DEBUG(dbgs() << Inst 3418 << " has illegal use of function in kernel.\n"); 3419 return true; 3420 } 3421 } 3422 } 3423 return false; 3424 } 3425 3426 /// Return whether the Scop S uses functions in a way that we do not support. 3427 bool containsInvalidKernelFunction(const Scop &S, bool AllowCUDALibDevice) { 3428 for (auto &Stmt : S) { 3429 if (Stmt.isBlockStmt()) { 3430 if (containsInvalidKernelFunctionInBlock(Stmt.getBasicBlock(), 3431 AllowCUDALibDevice)) 3432 return true; 3433 } else { 3434 assert(Stmt.isRegionStmt() && 3435 "Stmt was neither block nor region statement"); 3436 for (const BasicBlock *BB : Stmt.getRegion()->blocks()) 3437 if (containsInvalidKernelFunctionInBlock(BB, AllowCUDALibDevice)) 3438 return true; 3439 } 3440 } 3441 return false; 3442 } 3443 3444 /// Generate code for a given GPU AST described by @p Root. 3445 /// 3446 /// @param Root An isl_ast_node pointing to the root of the GPU AST. 3447 /// @param Prog The GPU Program to generate code for. 3448 void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) { 3449 ScopAnnotator Annotator; 3450 Annotator.buildAliasScopes(*S); 3451 3452 Region *R = &S->getRegion(); 3453 3454 simplifyRegion(R, DT, LI, RI); 3455 3456 BasicBlock *EnteringBB = R->getEnteringBlock(); 3457 3458 PollyIRBuilder Builder = createPollyIRBuilder(EnteringBB, Annotator); 3459 3460 // Only build the run-time condition and parameters _after_ having 3461 // introduced the conditional branch. This is important as the conditional 3462 // branch will guard the original scop from new induction variables that 3463 // the SCEVExpander may introduce while code generating the parameters and 3464 // which may introduce scalar dependences that prevent us from correctly 3465 // code generating this scop. 3466 BBPair StartExitBlocks; 3467 BranchInst *CondBr = nullptr; 3468 std::tie(StartExitBlocks, CondBr) = 3469 executeScopConditionally(*S, Builder.getTrue(), *DT, *RI, *LI); 3470 BasicBlock *StartBlock = std::get<0>(StartExitBlocks); 3471 3472 assert(CondBr && "CondBr not initialized by executeScopConditionally"); 3473 3474 GPUNodeBuilder NodeBuilder(Builder, Annotator, *DL, *LI, *SE, *DT, *S, 3475 StartBlock, Prog, Runtime, Architecture); 3476 3477 // TODO: Handle LICM 3478 auto SplitBlock = StartBlock->getSinglePredecessor(); 3479 Builder.SetInsertPoint(SplitBlock->getTerminator()); 3480 3481 isl_ast_build *Build = isl_ast_build_alloc(S->getIslCtx()); 3482 isl_ast_expr *Condition = IslAst::buildRunCondition(*S, Build); 3483 isl_ast_expr *SufficientCompute = createSufficientComputeCheck(*S, Build); 3484 Condition = isl_ast_expr_and(Condition, SufficientCompute); 3485 isl_ast_build_free(Build); 3486 3487 // preload invariant loads. Note: This should happen before the RTC 3488 // because the RTC may depend on values that are invariant load hoisted. 3489 if (!NodeBuilder.preloadInvariantLoads()) { 3490 // Patch the introduced branch condition to ensure that we always execute 3491 // the original SCoP. 3492 auto *FalseI1 = Builder.getFalse(); 3493 auto *SplitBBTerm = Builder.GetInsertBlock()->getTerminator(); 3494 SplitBBTerm->setOperand(0, FalseI1); 3495 3496 DEBUG(dbgs() << "preloading invariant loads failed in function: " + 3497 S->getFunction().getName() + 3498 " | Scop Region: " + S->getNameStr()); 3499 // adjust the dominator tree accordingly. 3500 auto *ExitingBlock = StartBlock->getUniqueSuccessor(); 3501 assert(ExitingBlock); 3502 auto *MergeBlock = ExitingBlock->getUniqueSuccessor(); 3503 assert(MergeBlock); 3504 polly::markBlockUnreachable(*StartBlock, Builder); 3505 polly::markBlockUnreachable(*ExitingBlock, Builder); 3506 auto *ExitingBB = S->getExitingBlock(); 3507 assert(ExitingBB); 3508 3509 DT->changeImmediateDominator(MergeBlock, ExitingBB); 3510 DT->eraseNode(ExitingBlock); 3511 isl_ast_expr_free(Condition); 3512 isl_ast_node_free(Root); 3513 } else { 3514 3515 if (polly::PerfMonitoring) { 3516 PerfMonitor P(*S, EnteringBB->getParent()->getParent()); 3517 P.initialize(); 3518 P.insertRegionStart(SplitBlock->getTerminator()); 3519 3520 // TODO: actually think if this is the correct exiting block to place 3521 // the `end` performance marker. Invariant load hoisting changes 3522 // the CFG in a way that I do not precisely understand, so I 3523 // (Siddharth<[email protected]>) should come back to this and 3524 // think about which exiting block to use. 3525 auto *ExitingBlock = StartBlock->getUniqueSuccessor(); 3526 assert(ExitingBlock); 3527 BasicBlock *MergeBlock = ExitingBlock->getUniqueSuccessor(); 3528 P.insertRegionEnd(MergeBlock->getTerminator()); 3529 } 3530 3531 NodeBuilder.addParameters(S->getContext().release()); 3532 Value *RTC = NodeBuilder.createRTC(Condition); 3533 Builder.GetInsertBlock()->getTerminator()->setOperand(0, RTC); 3534 3535 Builder.SetInsertPoint(&*StartBlock->begin()); 3536 3537 NodeBuilder.create(Root); 3538 } 3539 3540 /// In case a sequential kernel has more surrounding loops as any parallel 3541 /// kernel, the SCoP is probably mostly sequential. Hence, there is no 3542 /// point in running it on a GPU. 3543 if (NodeBuilder.DeepestSequential > NodeBuilder.DeepestParallel) 3544 CondBr->setOperand(0, Builder.getFalse()); 3545 3546 if (!NodeBuilder.BuildSuccessful) 3547 CondBr->setOperand(0, Builder.getFalse()); 3548 } 3549 3550 bool runOnScop(Scop &CurrentScop) override { 3551 S = &CurrentScop; 3552 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3553 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3554 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 3555 DL = &S->getRegion().getEntry()->getModule()->getDataLayout(); 3556 RI = &getAnalysis<RegionInfoPass>().getRegionInfo(); 3557 3558 DEBUG(dbgs() << "PPCGCodeGen running on : " << getUniqueScopName(S) 3559 << " | loop depth: " << S->getMaxLoopDepth() << "\n"); 3560 3561 // We currently do not support functions other than intrinsics inside 3562 // kernels, as code generation will need to offload function calls to the 3563 // kernel. This may lead to a kernel trying to call a function on the host. 3564 // This also allows us to prevent codegen from trying to take the 3565 // address of an intrinsic function to send to the kernel. 3566 if (containsInvalidKernelFunction(CurrentScop, 3567 Architecture == GPUArch::NVPTX64)) { 3568 DEBUG( 3569 dbgs() << getUniqueScopName(S) 3570 << " contains function which cannot be materialised in a GPU " 3571 "kernel. Bailing out.\n";); 3572 return false; 3573 } 3574 3575 auto PPCGScop = createPPCGScop(); 3576 auto PPCGProg = createPPCGProg(PPCGScop); 3577 auto PPCGGen = generateGPU(PPCGScop, PPCGProg); 3578 3579 if (PPCGGen->tree) { 3580 generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg); 3581 CurrentScop.markAsToBeSkipped(); 3582 } else { 3583 DEBUG(dbgs() << getUniqueScopName(S) 3584 << " has empty PPCGGen->tree. Bailing out.\n"); 3585 } 3586 3587 freeOptions(PPCGScop); 3588 freePPCGGen(PPCGGen); 3589 gpu_prog_free(PPCGProg); 3590 ppcg_scop_free(PPCGScop); 3591 3592 return true; 3593 } 3594 3595 void printScop(raw_ostream &, Scop &) const override {} 3596 3597 void getAnalysisUsage(AnalysisUsage &AU) const override { 3598 ScopPass::getAnalysisUsage(AU); 3599 3600 AU.addRequired<DominatorTreeWrapperPass>(); 3601 AU.addRequired<RegionInfoPass>(); 3602 AU.addRequired<ScalarEvolutionWrapperPass>(); 3603 AU.addRequired<ScopDetectionWrapperPass>(); 3604 AU.addRequired<ScopInfoRegionPass>(); 3605 AU.addRequired<LoopInfoWrapperPass>(); 3606 3607 // FIXME: We do not yet add regions for the newly generated code to the 3608 // region tree. 3609 } 3610 }; 3611 } // namespace 3612 3613 char PPCGCodeGeneration::ID = 1; 3614 3615 Pass *polly::createPPCGCodeGenerationPass(GPUArch Arch, GPURuntime Runtime) { 3616 PPCGCodeGeneration *generator = new PPCGCodeGeneration(); 3617 generator->Runtime = Runtime; 3618 generator->Architecture = Arch; 3619 return generator; 3620 } 3621 3622 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg", 3623 "Polly - Apply PPCG translation to SCOP", false, false) 3624 INITIALIZE_PASS_DEPENDENCY(DependenceInfo); 3625 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass); 3626 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass); 3627 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass); 3628 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass); 3629 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass); 3630 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg", 3631 "Polly - Apply PPCG translation to SCOP", false, false) 3632