1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Take a scop created by ScopInfo and map it to GPU code using the ppcg 11 // GPU mapping strategy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "polly/CodeGen/IslNodeBuilder.h" 16 #include "polly/CodeGen/Utils.h" 17 #include "polly/DependenceInfo.h" 18 #include "polly/LinkAllPasses.h" 19 #include "polly/Options.h" 20 #include "polly/ScopInfo.h" 21 #include "polly/Support/SCEVValidator.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/BasicAliasAnalysis.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/PostDominators.h" 26 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 27 28 #include "isl/union_map.h" 29 30 extern "C" { 31 #include "ppcg/cuda.h" 32 #include "ppcg/gpu.h" 33 #include "ppcg/gpu_print.h" 34 #include "ppcg/ppcg.h" 35 #include "ppcg/schedule.h" 36 } 37 38 #include "llvm/Support/Debug.h" 39 40 using namespace polly; 41 using namespace llvm; 42 43 #define DEBUG_TYPE "polly-codegen-ppcg" 44 45 static cl::opt<bool> DumpSchedule("polly-acc-dump-schedule", 46 cl::desc("Dump the computed GPU Schedule"), 47 cl::Hidden, cl::init(false), cl::ZeroOrMore, 48 cl::cat(PollyCategory)); 49 50 static cl::opt<bool> 51 DumpCode("polly-acc-dump-code", 52 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden, 53 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 54 55 static cl::opt<bool> DumpKernelIR("polly-acc-dump-kernel-ir", 56 cl::desc("Dump the kernel LLVM-IR"), 57 cl::Hidden, cl::init(false), cl::ZeroOrMore, 58 cl::cat(PollyCategory)); 59 60 /// Create the ast expressions for a ScopStmt. 61 /// 62 /// This function is a callback for to generate the ast expressions for each 63 /// of the scheduled ScopStmts. 64 static __isl_give isl_id_to_ast_expr *pollyBuildAstExprForStmt( 65 void *StmtT, isl_ast_build *Build, 66 isl_multi_pw_aff *(*FunctionIndex)(__isl_take isl_multi_pw_aff *MPA, 67 isl_id *Id, void *User), 68 void *UserIndex, 69 isl_ast_expr *(*FunctionExpr)(isl_ast_expr *Expr, isl_id *Id, void *User), 70 void *UserExpr) { 71 72 ScopStmt *Stmt = (ScopStmt *)StmtT; 73 74 isl_ctx *Ctx; 75 76 if (!Stmt || !Build) 77 return NULL; 78 79 Ctx = isl_ast_build_get_ctx(Build); 80 isl_id_to_ast_expr *RefToExpr = isl_id_to_ast_expr_alloc(Ctx, 0); 81 82 for (MemoryAccess *Acc : *Stmt) { 83 isl_map *AddrFunc = Acc->getAddressFunction(); 84 AddrFunc = isl_map_intersect_domain(AddrFunc, Stmt->getDomain()); 85 isl_id *RefId = Acc->getId(); 86 isl_pw_multi_aff *PMA = isl_pw_multi_aff_from_map(AddrFunc); 87 isl_multi_pw_aff *MPA = isl_multi_pw_aff_from_pw_multi_aff(PMA); 88 MPA = isl_multi_pw_aff_coalesce(MPA); 89 MPA = FunctionIndex(MPA, RefId, UserIndex); 90 isl_ast_expr *Access = isl_ast_build_access_from_multi_pw_aff(Build, MPA); 91 Access = FunctionExpr(Access, RefId, UserExpr); 92 RefToExpr = isl_id_to_ast_expr_set(RefToExpr, RefId, Access); 93 } 94 95 return RefToExpr; 96 } 97 98 /// Generate code for a GPU specific isl AST. 99 /// 100 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which 101 /// generates code for general-prupose AST nodes, with special functionality 102 /// for generating GPU specific user nodes. 103 /// 104 /// @see GPUNodeBuilder::createUser 105 class GPUNodeBuilder : public IslNodeBuilder { 106 public: 107 GPUNodeBuilder(PollyIRBuilder &Builder, ScopAnnotator &Annotator, Pass *P, 108 const DataLayout &DL, LoopInfo &LI, ScalarEvolution &SE, 109 DominatorTree &DT, Scop &S, gpu_prog *Prog) 110 : IslNodeBuilder(Builder, Annotator, P, DL, LI, SE, DT, S), Prog(Prog) { 111 getExprBuilder().setIDToSAI(&IDToSAI); 112 } 113 114 private: 115 /// A module containing GPU code. 116 /// 117 /// This pointer is only set in case we are currently generating GPU code. 118 std::unique_ptr<Module> GPUModule; 119 120 /// The GPU program we generate code for. 121 gpu_prog *Prog; 122 123 /// Class to free isl_ids. 124 class IslIdDeleter { 125 public: 126 void operator()(__isl_take isl_id *Id) { isl_id_free(Id); }; 127 }; 128 129 /// A set containing all isl_ids allocated in a GPU kernel. 130 /// 131 /// By releasing this set all isl_ids will be freed. 132 std::set<std::unique_ptr<isl_id, IslIdDeleter>> KernelIDs; 133 134 IslExprBuilder::IDToScopArrayInfoTy IDToSAI; 135 136 /// Create code for user-defined AST nodes. 137 /// 138 /// These AST nodes can be of type: 139 /// 140 /// - ScopStmt: A computational statement (TODO) 141 /// - Kernel: A GPU kernel call (TODO) 142 /// - Data-Transfer: A GPU <-> CPU data-transfer (TODO) 143 /// - In-kernel synchronization 144 /// - In-kernel memory copy statement 145 /// 146 /// @param UserStmt The ast node to generate code for. 147 virtual void createUser(__isl_take isl_ast_node *UserStmt); 148 149 /// Find llvm::Values referenced in GPU kernel. 150 /// 151 /// @param Kernel The kernel to scan for llvm::Values 152 /// 153 /// @returns A set of values referenced by the kernel. 154 SetVector<Value *> getReferencesInKernel(ppcg_kernel *Kernel); 155 156 /// Create GPU kernel. 157 /// 158 /// Code generate the kernel described by @p KernelStmt. 159 /// 160 /// @param KernelStmt The ast node to generate kernel code for. 161 void createKernel(__isl_take isl_ast_node *KernelStmt); 162 163 /// Create kernel function. 164 /// 165 /// Create a kernel function located in a newly created module that can serve 166 /// as target for device code generation. Set the Builder to point to the 167 /// start block of this newly created function. 168 /// 169 /// @param Kernel The kernel to generate code for. 170 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 171 void createKernelFunction(ppcg_kernel *Kernel, 172 SetVector<Value *> &SubtreeValues); 173 174 /// Create the declaration of a kernel function. 175 /// 176 /// The kernel function takes as arguments: 177 /// 178 /// - One i8 pointer for each external array reference used in the kernel. 179 /// - Host iterators 180 /// - Parameters 181 /// - Other LLVM Value references (TODO) 182 /// 183 /// @param Kernel The kernel to generate the function declaration for. 184 /// @param SubtreeValues The set of llvm::Values referenced by this kernel. 185 /// 186 /// @returns The newly declared function. 187 Function *createKernelFunctionDecl(ppcg_kernel *Kernel, 188 SetVector<Value *> &SubtreeValues); 189 190 /// Insert intrinsic functions to obtain thread and block ids. 191 /// 192 /// @param The kernel to generate the intrinsic functions for. 193 void insertKernelIntrinsics(ppcg_kernel *Kernel); 194 195 /// Create code for a ScopStmt called in @p Expr. 196 /// 197 /// @param Expr The expression containing the call. 198 /// @param KernelStmt The kernel statement referenced in the call. 199 void createScopStmt(isl_ast_expr *Expr, ppcg_kernel_stmt *KernelStmt); 200 201 /// Create an in-kernel synchronization call. 202 void createKernelSync(); 203 204 /// Finalize the generation of the kernel function. 205 /// 206 /// Free the LLVM-IR module corresponding to the kernel and -- if requested -- 207 /// dump its IR to stderr. 208 void finalizeKernelFunction(); 209 }; 210 211 /// Check if one string is a prefix of another. 212 /// 213 /// @param String The string in which to look for the prefix. 214 /// @param Prefix The prefix to look for. 215 static bool isPrefix(std::string String, std::string Prefix) { 216 return String.find(Prefix) == 0; 217 } 218 219 void GPUNodeBuilder::createUser(__isl_take isl_ast_node *UserStmt) { 220 isl_ast_expr *Expr = isl_ast_node_user_get_expr(UserStmt); 221 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0); 222 isl_id *Id = isl_ast_expr_get_id(StmtExpr); 223 isl_id_free(Id); 224 isl_ast_expr_free(StmtExpr); 225 226 const char *Str = isl_id_get_name(Id); 227 if (!strcmp(Str, "kernel")) { 228 createKernel(UserStmt); 229 isl_ast_expr_free(Expr); 230 return; 231 } 232 233 if (isPrefix(Str, "to_device") || isPrefix(Str, "from_device")) { 234 // TODO: Insert memory copies 235 isl_ast_expr_free(Expr); 236 isl_ast_node_free(UserStmt); 237 return; 238 } 239 240 isl_id *Anno = isl_ast_node_get_annotation(UserStmt); 241 struct ppcg_kernel_stmt *KernelStmt = 242 (struct ppcg_kernel_stmt *)isl_id_get_user(Anno); 243 isl_id_free(Anno); 244 245 switch (KernelStmt->type) { 246 case ppcg_kernel_domain: 247 createScopStmt(Expr, KernelStmt); 248 isl_ast_node_free(UserStmt); 249 return; 250 case ppcg_kernel_copy: 251 // TODO: Create kernel copy stmt 252 isl_ast_expr_free(Expr); 253 isl_ast_node_free(UserStmt); 254 return; 255 case ppcg_kernel_sync: 256 createKernelSync(); 257 isl_ast_expr_free(Expr); 258 isl_ast_node_free(UserStmt); 259 return; 260 } 261 262 isl_ast_expr_free(Expr); 263 isl_ast_node_free(UserStmt); 264 return; 265 } 266 267 void GPUNodeBuilder::createScopStmt(isl_ast_expr *Expr, 268 ppcg_kernel_stmt *KernelStmt) { 269 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt; 270 isl_id_to_ast_expr *Indexes = KernelStmt->u.d.ref2expr; 271 272 LoopToScevMapT LTS; 273 LTS.insert(OutsideLoopIterations.begin(), OutsideLoopIterations.end()); 274 275 createSubstitutions(Expr, Stmt, LTS); 276 277 if (Stmt->isBlockStmt()) 278 BlockGen.copyStmt(*Stmt, LTS, Indexes); 279 else 280 assert(0 && "Region statement not supported\n"); 281 } 282 283 void GPUNodeBuilder::createKernelSync() { 284 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 285 auto *Sync = Intrinsic::getDeclaration(M, Intrinsic::nvvm_barrier0); 286 Builder.CreateCall(Sync, {}); 287 } 288 289 /// Collect llvm::Values referenced from @p Node 290 /// 291 /// This function only applies to isl_ast_nodes that are user_nodes referring 292 /// to a ScopStmt. All other node types are ignore. 293 /// 294 /// @param Node The node to collect references for. 295 /// @param User A user pointer used as storage for the data that is collected. 296 /// 297 /// @returns isl_bool_true if data could be collected successfully. 298 isl_bool collectReferencesInGPUStmt(__isl_keep isl_ast_node *Node, void *User) { 299 if (isl_ast_node_get_type(Node) != isl_ast_node_user) 300 return isl_bool_true; 301 302 isl_ast_expr *Expr = isl_ast_node_user_get_expr(Node); 303 isl_ast_expr *StmtExpr = isl_ast_expr_get_op_arg(Expr, 0); 304 isl_id *Id = isl_ast_expr_get_id(StmtExpr); 305 const char *Str = isl_id_get_name(Id); 306 isl_id_free(Id); 307 isl_ast_expr_free(StmtExpr); 308 isl_ast_expr_free(Expr); 309 310 if (!isPrefix(Str, "Stmt")) 311 return isl_bool_true; 312 313 Id = isl_ast_node_get_annotation(Node); 314 auto *KernelStmt = (ppcg_kernel_stmt *)isl_id_get_user(Id); 315 auto Stmt = (ScopStmt *)KernelStmt->u.d.stmt->stmt; 316 isl_id_free(Id); 317 318 addReferencesFromStmt(Stmt, User); 319 320 return isl_bool_true; 321 } 322 323 SetVector<Value *> GPUNodeBuilder::getReferencesInKernel(ppcg_kernel *Kernel) { 324 SetVector<Value *> SubtreeValues; 325 SetVector<const SCEV *> SCEVs; 326 SetVector<const Loop *> Loops; 327 SubtreeReferences References = { 328 LI, SE, S, ValueMap, SubtreeValues, SCEVs, getBlockGenerator()}; 329 330 for (const auto &I : IDToValue) 331 SubtreeValues.insert(I.second); 332 333 isl_ast_node_foreach_descendant_top_down( 334 Kernel->tree, collectReferencesInGPUStmt, &References); 335 336 for (const SCEV *Expr : SCEVs) 337 findValues(Expr, SE, SubtreeValues); 338 339 for (auto &SAI : S.arrays()) 340 SubtreeValues.remove(SAI.second->getBasePtr()); 341 342 isl_space *Space = S.getParamSpace(); 343 for (long i = 0; i < isl_space_dim(Space, isl_dim_param); i++) { 344 isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, i); 345 assert(IDToValue.count(Id)); 346 Value *Val = IDToValue[Id]; 347 SubtreeValues.remove(Val); 348 isl_id_free(Id); 349 } 350 isl_space_free(Space); 351 352 for (long i = 0; i < isl_space_dim(Kernel->space, isl_dim_set); i++) { 353 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 354 assert(IDToValue.count(Id)); 355 Value *Val = IDToValue[Id]; 356 SubtreeValues.remove(Val); 357 isl_id_free(Id); 358 } 359 360 return SubtreeValues; 361 } 362 363 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node *KernelStmt) { 364 isl_id *Id = isl_ast_node_get_annotation(KernelStmt); 365 ppcg_kernel *Kernel = (ppcg_kernel *)isl_id_get_user(Id); 366 isl_id_free(Id); 367 isl_ast_node_free(KernelStmt); 368 369 SetVector<Value *> SubtreeValues = getReferencesInKernel(Kernel); 370 371 assert(Kernel->tree && "Device AST of kernel node is empty"); 372 373 Instruction &HostInsertPoint = *Builder.GetInsertPoint(); 374 IslExprBuilder::IDToValueTy HostIDs = IDToValue; 375 ValueMapT HostValueMap = ValueMap; 376 377 SetVector<const Loop *> Loops; 378 379 // Create for all loops we depend on values that contain the current loop 380 // iteration. These values are necessary to generate code for SCEVs that 381 // depend on such loops. As a result we need to pass them to the subfunction. 382 for (const Loop *L : Loops) { 383 const SCEV *OuterLIV = SE.getAddRecExpr(SE.getUnknown(Builder.getInt64(0)), 384 SE.getUnknown(Builder.getInt64(1)), 385 L, SCEV::FlagAnyWrap); 386 Value *V = generateSCEV(OuterLIV); 387 OutsideLoopIterations[L] = SE.getUnknown(V); 388 SubtreeValues.insert(V); 389 } 390 391 createKernelFunction(Kernel, SubtreeValues); 392 393 create(isl_ast_node_copy(Kernel->tree)); 394 395 Builder.SetInsertPoint(&HostInsertPoint); 396 IDToValue = HostIDs; 397 398 ValueMap = HostValueMap; 399 ScalarMap.clear(); 400 PHIOpMap.clear(); 401 EscapeMap.clear(); 402 IDToSAI.clear(); 403 404 finalizeKernelFunction(); 405 } 406 407 /// Compute the DataLayout string for the NVPTX backend. 408 /// 409 /// @param is64Bit Are we looking for a 64 bit architecture? 410 static std::string computeNVPTXDataLayout(bool is64Bit) { 411 std::string Ret = "e"; 412 413 if (!is64Bit) 414 Ret += "-p:32:32"; 415 416 Ret += "-i64:64-v16:16-v32:32-n16:32:64"; 417 418 return Ret; 419 } 420 421 Function * 422 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel *Kernel, 423 SetVector<Value *> &SubtreeValues) { 424 std::vector<Type *> Args; 425 std::string Identifier = "kernel_" + std::to_string(Kernel->id); 426 427 for (long i = 0; i < Prog->n_array; i++) { 428 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 429 continue; 430 431 Args.push_back(Builder.getInt8PtrTy()); 432 } 433 434 int NumHostIters = isl_space_dim(Kernel->space, isl_dim_set); 435 436 for (long i = 0; i < NumHostIters; i++) 437 Args.push_back(Builder.getInt64Ty()); 438 439 int NumVars = isl_space_dim(Kernel->space, isl_dim_param); 440 441 for (long i = 0; i < NumVars; i++) 442 Args.push_back(Builder.getInt64Ty()); 443 444 for (auto *V : SubtreeValues) 445 Args.push_back(V->getType()); 446 447 auto *FT = FunctionType::get(Builder.getVoidTy(), Args, false); 448 auto *FN = Function::Create(FT, Function::ExternalLinkage, Identifier, 449 GPUModule.get()); 450 FN->setCallingConv(CallingConv::PTX_Kernel); 451 452 auto Arg = FN->arg_begin(); 453 for (long i = 0; i < Kernel->n_array; i++) { 454 if (!ppcg_kernel_requires_array_argument(Kernel, i)) 455 continue; 456 457 Arg->setName(Kernel->array[i].array->name); 458 459 isl_id *Id = isl_space_get_tuple_id(Prog->array[i].space, isl_dim_set); 460 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(isl_id_copy(Id)); 461 Type *EleTy = SAI->getElementType(); 462 Value *Val = &*Arg; 463 SmallVector<const SCEV *, 4> Sizes; 464 isl_ast_build *Build = 465 isl_ast_build_from_context(isl_set_copy(Prog->context)); 466 for (long j = 1; j < Kernel->array[i].array->n_index; j++) { 467 isl_ast_expr *DimSize = isl_ast_build_expr_from_pw_aff( 468 Build, isl_pw_aff_copy(Kernel->array[i].array->bound[j])); 469 auto V = ExprBuilder.create(DimSize); 470 Sizes.push_back(SE.getSCEV(V)); 471 } 472 const ScopArrayInfo *SAIRep = 473 S.getOrCreateScopArrayInfo(Val, EleTy, Sizes, ScopArrayInfo::MK_Array); 474 475 isl_ast_build_free(Build); 476 isl_id_free(Id); 477 IDToSAI[Id] = SAIRep; 478 Arg++; 479 } 480 481 for (long i = 0; i < NumHostIters; i++) { 482 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_set, i); 483 Arg->setName(isl_id_get_name(Id)); 484 IDToValue[Id] = &*Arg; 485 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 486 Arg++; 487 } 488 489 for (long i = 0; i < NumVars; i++) { 490 isl_id *Id = isl_space_get_dim_id(Kernel->space, isl_dim_param, i); 491 Arg->setName(isl_id_get_name(Id)); 492 IDToValue[Id] = &*Arg; 493 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 494 Arg++; 495 } 496 497 for (auto *V : SubtreeValues) { 498 Arg->setName(V->getName()); 499 ValueMap[V] = &*Arg; 500 Arg++; 501 } 502 503 return FN; 504 } 505 506 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel *Kernel) { 507 Intrinsic::ID IntrinsicsBID[] = {Intrinsic::nvvm_read_ptx_sreg_ctaid_x, 508 Intrinsic::nvvm_read_ptx_sreg_ctaid_y}; 509 510 Intrinsic::ID IntrinsicsTID[] = {Intrinsic::nvvm_read_ptx_sreg_tid_x, 511 Intrinsic::nvvm_read_ptx_sreg_tid_y, 512 Intrinsic::nvvm_read_ptx_sreg_tid_z}; 513 514 auto addId = [this](__isl_take isl_id *Id, Intrinsic::ID Intr) mutable { 515 std::string Name = isl_id_get_name(Id); 516 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 517 Function *IntrinsicFn = Intrinsic::getDeclaration(M, Intr); 518 Value *Val = Builder.CreateCall(IntrinsicFn, {}); 519 Val = Builder.CreateIntCast(Val, Builder.getInt64Ty(), false, Name); 520 IDToValue[Id] = Val; 521 KernelIDs.insert(std::unique_ptr<isl_id, IslIdDeleter>(Id)); 522 }; 523 524 for (int i = 0; i < Kernel->n_grid; ++i) { 525 isl_id *Id = isl_id_list_get_id(Kernel->block_ids, i); 526 addId(Id, IntrinsicsBID[i]); 527 } 528 529 for (int i = 0; i < Kernel->n_block; ++i) { 530 isl_id *Id = isl_id_list_get_id(Kernel->thread_ids, i); 531 addId(Id, IntrinsicsTID[i]); 532 } 533 } 534 535 void GPUNodeBuilder::createKernelFunction(ppcg_kernel *Kernel, 536 SetVector<Value *> &SubtreeValues) { 537 538 std::string Identifier = "kernel_" + std::to_string(Kernel->id); 539 GPUModule.reset(new Module(Identifier, Builder.getContext())); 540 GPUModule->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda")); 541 GPUModule->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */)); 542 543 Function *FN = createKernelFunctionDecl(Kernel, SubtreeValues); 544 545 BasicBlock *PrevBlock = Builder.GetInsertBlock(); 546 auto EntryBlock = BasicBlock::Create(Builder.getContext(), "entry", FN); 547 548 DominatorTree &DT = P->getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 549 DT.addNewBlock(EntryBlock, PrevBlock); 550 551 Builder.SetInsertPoint(EntryBlock); 552 Builder.CreateRetVoid(); 553 Builder.SetInsertPoint(EntryBlock, EntryBlock->begin()); 554 555 insertKernelIntrinsics(Kernel); 556 } 557 558 void GPUNodeBuilder::finalizeKernelFunction() { 559 560 if (DumpKernelIR) 561 outs() << *GPUModule << "\n"; 562 563 GPUModule.release(); 564 KernelIDs.clear(); 565 } 566 567 namespace { 568 class PPCGCodeGeneration : public ScopPass { 569 public: 570 static char ID; 571 572 /// The scop that is currently processed. 573 Scop *S; 574 575 LoopInfo *LI; 576 DominatorTree *DT; 577 ScalarEvolution *SE; 578 const DataLayout *DL; 579 RegionInfo *RI; 580 581 PPCGCodeGeneration() : ScopPass(ID) {} 582 583 /// Construct compilation options for PPCG. 584 /// 585 /// @returns The compilation options. 586 ppcg_options *createPPCGOptions() { 587 auto DebugOptions = 588 (ppcg_debug_options *)malloc(sizeof(ppcg_debug_options)); 589 auto Options = (ppcg_options *)malloc(sizeof(ppcg_options)); 590 591 DebugOptions->dump_schedule_constraints = false; 592 DebugOptions->dump_schedule = false; 593 DebugOptions->dump_final_schedule = false; 594 DebugOptions->dump_sizes = false; 595 596 Options->debug = DebugOptions; 597 598 Options->reschedule = true; 599 Options->scale_tile_loops = false; 600 Options->wrap = false; 601 602 Options->non_negative_parameters = false; 603 Options->ctx = nullptr; 604 Options->sizes = nullptr; 605 606 Options->tile_size = 32; 607 608 Options->use_private_memory = false; 609 Options->use_shared_memory = false; 610 Options->max_shared_memory = 0; 611 612 Options->target = PPCG_TARGET_CUDA; 613 Options->openmp = false; 614 Options->linearize_device_arrays = true; 615 Options->live_range_reordering = false; 616 617 Options->opencl_compiler_options = nullptr; 618 Options->opencl_use_gpu = false; 619 Options->opencl_n_include_file = 0; 620 Options->opencl_include_files = nullptr; 621 Options->opencl_print_kernel_types = false; 622 Options->opencl_embed_kernel_code = false; 623 624 Options->save_schedule_file = nullptr; 625 Options->load_schedule_file = nullptr; 626 627 return Options; 628 } 629 630 /// Get a tagged access relation containing all accesses of type @p AccessTy. 631 /// 632 /// Instead of a normal access of the form: 633 /// 634 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)] 635 /// 636 /// a tagged access has the form 637 /// 638 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)] 639 /// 640 /// where 'id' is an additional space that references the memory access that 641 /// triggered the access. 642 /// 643 /// @param AccessTy The type of the memory accesses to collect. 644 /// 645 /// @return The relation describing all tagged memory accesses. 646 isl_union_map *getTaggedAccesses(enum MemoryAccess::AccessType AccessTy) { 647 isl_union_map *Accesses = isl_union_map_empty(S->getParamSpace()); 648 649 for (auto &Stmt : *S) 650 for (auto &Acc : Stmt) 651 if (Acc->getType() == AccessTy) { 652 isl_map *Relation = Acc->getAccessRelation(); 653 Relation = isl_map_intersect_domain(Relation, Stmt.getDomain()); 654 655 isl_space *Space = isl_map_get_space(Relation); 656 Space = isl_space_range(Space); 657 Space = isl_space_from_range(Space); 658 Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId()); 659 isl_map *Universe = isl_map_universe(Space); 660 Relation = isl_map_domain_product(Relation, Universe); 661 Accesses = isl_union_map_add_map(Accesses, Relation); 662 } 663 664 return Accesses; 665 } 666 667 /// Get the set of all read accesses, tagged with the access id. 668 /// 669 /// @see getTaggedAccesses 670 isl_union_map *getTaggedReads() { 671 return getTaggedAccesses(MemoryAccess::READ); 672 } 673 674 /// Get the set of all may (and must) accesses, tagged with the access id. 675 /// 676 /// @see getTaggedAccesses 677 isl_union_map *getTaggedMayWrites() { 678 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE), 679 getTaggedAccesses(MemoryAccess::MUST_WRITE)); 680 } 681 682 /// Get the set of all must accesses, tagged with the access id. 683 /// 684 /// @see getTaggedAccesses 685 isl_union_map *getTaggedMustWrites() { 686 return getTaggedAccesses(MemoryAccess::MUST_WRITE); 687 } 688 689 /// Collect parameter and array names as isl_ids. 690 /// 691 /// To reason about the different parameters and arrays used, ppcg requires 692 /// a list of all isl_ids in use. As PPCG traditionally performs 693 /// source-to-source compilation each of these isl_ids is mapped to the 694 /// expression that represents it. As we do not have a corresponding 695 /// expression in Polly, we just map each id to a 'zero' expression to match 696 /// the data format that ppcg expects. 697 /// 698 /// @returns Retun a map from collected ids to 'zero' ast expressions. 699 __isl_give isl_id_to_ast_expr *getNames() { 700 auto *Names = isl_id_to_ast_expr_alloc( 701 S->getIslCtx(), 702 S->getNumParams() + std::distance(S->array_begin(), S->array_end())); 703 auto *Zero = isl_ast_expr_from_val(isl_val_zero(S->getIslCtx())); 704 auto *Space = S->getParamSpace(); 705 706 for (int I = 0, E = S->getNumParams(); I < E; ++I) { 707 isl_id *Id = isl_space_get_dim_id(Space, isl_dim_param, I); 708 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero)); 709 } 710 711 for (auto &Array : S->arrays()) { 712 auto Id = Array.second->getBasePtrId(); 713 Names = isl_id_to_ast_expr_set(Names, Id, isl_ast_expr_copy(Zero)); 714 } 715 716 isl_space_free(Space); 717 isl_ast_expr_free(Zero); 718 719 return Names; 720 } 721 722 /// Create a new PPCG scop from the current scop. 723 /// 724 /// The PPCG scop is initialized with data from the current polly::Scop. From 725 /// this initial data, the data-dependences in the PPCG scop are initialized. 726 /// We do not use Polly's dependence analysis for now, to ensure we match 727 /// the PPCG default behaviour more closely. 728 /// 729 /// @returns A new ppcg scop. 730 ppcg_scop *createPPCGScop() { 731 auto PPCGScop = (ppcg_scop *)malloc(sizeof(ppcg_scop)); 732 733 PPCGScop->options = createPPCGOptions(); 734 735 PPCGScop->start = 0; 736 PPCGScop->end = 0; 737 738 PPCGScop->context = S->getContext(); 739 PPCGScop->domain = S->getDomains(); 740 PPCGScop->call = nullptr; 741 PPCGScop->tagged_reads = getTaggedReads(); 742 PPCGScop->reads = S->getReads(); 743 PPCGScop->live_in = nullptr; 744 PPCGScop->tagged_may_writes = getTaggedMayWrites(); 745 PPCGScop->may_writes = S->getWrites(); 746 PPCGScop->tagged_must_writes = getTaggedMustWrites(); 747 PPCGScop->must_writes = S->getMustWrites(); 748 PPCGScop->live_out = nullptr; 749 PPCGScop->tagged_must_kills = isl_union_map_empty(S->getParamSpace()); 750 PPCGScop->tagger = nullptr; 751 752 PPCGScop->independence = nullptr; 753 PPCGScop->dep_flow = nullptr; 754 PPCGScop->tagged_dep_flow = nullptr; 755 PPCGScop->dep_false = nullptr; 756 PPCGScop->dep_forced = nullptr; 757 PPCGScop->dep_order = nullptr; 758 PPCGScop->tagged_dep_order = nullptr; 759 760 PPCGScop->schedule = S->getScheduleTree(); 761 PPCGScop->names = getNames(); 762 763 PPCGScop->pet = nullptr; 764 765 compute_tagger(PPCGScop); 766 compute_dependences(PPCGScop); 767 768 return PPCGScop; 769 } 770 771 /// Collect the array acesses in a statement. 772 /// 773 /// @param Stmt The statement for which to collect the accesses. 774 /// 775 /// @returns A list of array accesses. 776 gpu_stmt_access *getStmtAccesses(ScopStmt &Stmt) { 777 gpu_stmt_access *Accesses = nullptr; 778 779 for (MemoryAccess *Acc : Stmt) { 780 auto Access = isl_alloc_type(S->getIslCtx(), struct gpu_stmt_access); 781 Access->read = Acc->isRead(); 782 Access->write = Acc->isWrite(); 783 Access->access = Acc->getAccessRelation(); 784 isl_space *Space = isl_map_get_space(Access->access); 785 Space = isl_space_range(Space); 786 Space = isl_space_from_range(Space); 787 Space = isl_space_set_tuple_id(Space, isl_dim_in, Acc->getId()); 788 isl_map *Universe = isl_map_universe(Space); 789 Access->tagged_access = 790 isl_map_domain_product(Acc->getAccessRelation(), Universe); 791 Access->exact_write = Acc->isWrite(); 792 Access->ref_id = Acc->getId(); 793 Access->next = Accesses; 794 Accesses = Access; 795 } 796 797 return Accesses; 798 } 799 800 /// Collect the list of GPU statements. 801 /// 802 /// Each statement has an id, a pointer to the underlying data structure, 803 /// as well as a list with all memory accesses. 804 /// 805 /// TODO: Initialize the list of memory accesses. 806 /// 807 /// @returns A linked-list of statements. 808 gpu_stmt *getStatements() { 809 gpu_stmt *Stmts = isl_calloc_array(S->getIslCtx(), struct gpu_stmt, 810 std::distance(S->begin(), S->end())); 811 812 int i = 0; 813 for (auto &Stmt : *S) { 814 gpu_stmt *GPUStmt = &Stmts[i]; 815 816 GPUStmt->id = Stmt.getDomainId(); 817 818 // We use the pet stmt pointer to keep track of the Polly statements. 819 GPUStmt->stmt = (pet_stmt *)&Stmt; 820 GPUStmt->accesses = getStmtAccesses(Stmt); 821 i++; 822 } 823 824 return Stmts; 825 } 826 827 /// Derive the extent of an array. 828 /// 829 /// The extent of an array is defined by the set of memory locations for 830 /// which a memory access in the iteration domain exists. 831 /// 832 /// @param Array The array to derive the extent for. 833 /// 834 /// @returns An isl_set describing the extent of the array. 835 __isl_give isl_set *getExtent(ScopArrayInfo *Array) { 836 isl_union_map *Accesses = S->getAccesses(); 837 Accesses = isl_union_map_intersect_domain(Accesses, S->getDomains()); 838 isl_union_set *AccessUSet = isl_union_map_range(Accesses); 839 isl_set *AccessSet = 840 isl_union_set_extract_set(AccessUSet, Array->getSpace()); 841 isl_union_set_free(AccessUSet); 842 843 return AccessSet; 844 } 845 846 /// Derive the bounds of an array. 847 /// 848 /// For the first dimension we derive the bound of the array from the extent 849 /// of this dimension. For inner dimensions we obtain their size directly from 850 /// ScopArrayInfo. 851 /// 852 /// @param PPCGArray The array to compute bounds for. 853 /// @param Array The polly array from which to take the information. 854 void setArrayBounds(gpu_array_info &PPCGArray, ScopArrayInfo *Array) { 855 if (PPCGArray.n_index > 0) { 856 isl_set *Dom = isl_set_copy(PPCGArray.extent); 857 Dom = isl_set_project_out(Dom, isl_dim_set, 1, PPCGArray.n_index - 1); 858 isl_pw_aff *Bound = isl_set_dim_max(isl_set_copy(Dom), 0); 859 isl_set_free(Dom); 860 Dom = isl_pw_aff_domain(isl_pw_aff_copy(Bound)); 861 isl_local_space *LS = isl_local_space_from_space(isl_set_get_space(Dom)); 862 isl_aff *One = isl_aff_zero_on_domain(LS); 863 One = isl_aff_add_constant_si(One, 1); 864 Bound = isl_pw_aff_add(Bound, isl_pw_aff_alloc(Dom, One)); 865 Bound = isl_pw_aff_gist(Bound, S->getContext()); 866 PPCGArray.bound[0] = Bound; 867 } 868 869 for (unsigned i = 1; i < PPCGArray.n_index; ++i) { 870 isl_pw_aff *Bound = Array->getDimensionSizePw(i); 871 auto LS = isl_pw_aff_get_domain_space(Bound); 872 auto Aff = isl_multi_aff_zero(LS); 873 Bound = isl_pw_aff_pullback_multi_aff(Bound, Aff); 874 PPCGArray.bound[i] = Bound; 875 } 876 } 877 878 /// Create the arrays for @p PPCGProg. 879 /// 880 /// @param PPCGProg The program to compute the arrays for. 881 void createArrays(gpu_prog *PPCGProg) { 882 int i = 0; 883 for (auto &Element : S->arrays()) { 884 ScopArrayInfo *Array = Element.second.get(); 885 886 std::string TypeName; 887 raw_string_ostream OS(TypeName); 888 889 OS << *Array->getElementType(); 890 TypeName = OS.str(); 891 892 gpu_array_info &PPCGArray = PPCGProg->array[i]; 893 894 PPCGArray.space = Array->getSpace(); 895 PPCGArray.type = strdup(TypeName.c_str()); 896 PPCGArray.size = Array->getElementType()->getPrimitiveSizeInBits() / 8; 897 PPCGArray.name = strdup(Array->getName().c_str()); 898 PPCGArray.extent = nullptr; 899 PPCGArray.n_index = Array->getNumberOfDimensions(); 900 PPCGArray.bound = 901 isl_alloc_array(S->getIslCtx(), isl_pw_aff *, PPCGArray.n_index); 902 PPCGArray.extent = getExtent(Array); 903 PPCGArray.n_ref = 0; 904 PPCGArray.refs = nullptr; 905 PPCGArray.accessed = true; 906 PPCGArray.read_only_scalar = false; 907 PPCGArray.has_compound_element = false; 908 PPCGArray.local = false; 909 PPCGArray.declare_local = false; 910 PPCGArray.global = false; 911 PPCGArray.linearize = false; 912 PPCGArray.dep_order = nullptr; 913 914 setArrayBounds(PPCGArray, Array); 915 i++; 916 917 collect_references(PPCGProg, &PPCGArray); 918 } 919 } 920 921 /// Create an identity map between the arrays in the scop. 922 /// 923 /// @returns An identity map between the arrays in the scop. 924 isl_union_map *getArrayIdentity() { 925 isl_union_map *Maps = isl_union_map_empty(S->getParamSpace()); 926 927 for (auto &Item : S->arrays()) { 928 ScopArrayInfo *Array = Item.second.get(); 929 isl_space *Space = Array->getSpace(); 930 Space = isl_space_map_from_set(Space); 931 isl_map *Identity = isl_map_identity(Space); 932 Maps = isl_union_map_add_map(Maps, Identity); 933 } 934 935 return Maps; 936 } 937 938 /// Create a default-initialized PPCG GPU program. 939 /// 940 /// @returns A new gpu grogram description. 941 gpu_prog *createPPCGProg(ppcg_scop *PPCGScop) { 942 943 if (!PPCGScop) 944 return nullptr; 945 946 auto PPCGProg = isl_calloc_type(S->getIslCtx(), struct gpu_prog); 947 948 PPCGProg->ctx = S->getIslCtx(); 949 PPCGProg->scop = PPCGScop; 950 PPCGProg->context = isl_set_copy(PPCGScop->context); 951 PPCGProg->read = isl_union_map_copy(PPCGScop->reads); 952 PPCGProg->may_write = isl_union_map_copy(PPCGScop->may_writes); 953 PPCGProg->must_write = isl_union_map_copy(PPCGScop->must_writes); 954 PPCGProg->tagged_must_kill = 955 isl_union_map_copy(PPCGScop->tagged_must_kills); 956 PPCGProg->to_inner = getArrayIdentity(); 957 PPCGProg->to_outer = getArrayIdentity(); 958 PPCGProg->may_persist = compute_may_persist(PPCGProg); 959 PPCGProg->any_to_outer = nullptr; 960 PPCGProg->array_order = nullptr; 961 PPCGProg->n_stmts = std::distance(S->begin(), S->end()); 962 PPCGProg->stmts = getStatements(); 963 PPCGProg->n_array = std::distance(S->array_begin(), S->array_end()); 964 PPCGProg->array = isl_calloc_array(S->getIslCtx(), struct gpu_array_info, 965 PPCGProg->n_array); 966 967 createArrays(PPCGProg); 968 969 return PPCGProg; 970 } 971 972 struct PrintGPUUserData { 973 struct cuda_info *CudaInfo; 974 struct gpu_prog *PPCGProg; 975 std::vector<ppcg_kernel *> Kernels; 976 }; 977 978 /// Print a user statement node in the host code. 979 /// 980 /// We use ppcg's printing facilities to print the actual statement and 981 /// additionally build up a list of all kernels that are encountered in the 982 /// host ast. 983 /// 984 /// @param P The printer to print to 985 /// @param Options The printing options to use 986 /// @param Node The node to print 987 /// @param User A user pointer to carry additional data. This pointer is 988 /// expected to be of type PrintGPUUserData. 989 /// 990 /// @returns A printer to which the output has been printed. 991 static __isl_give isl_printer * 992 printHostUser(__isl_take isl_printer *P, 993 __isl_take isl_ast_print_options *Options, 994 __isl_take isl_ast_node *Node, void *User) { 995 auto Data = (struct PrintGPUUserData *)User; 996 auto Id = isl_ast_node_get_annotation(Node); 997 998 if (Id) { 999 bool IsUser = !strcmp(isl_id_get_name(Id), "user"); 1000 1001 // If this is a user statement, format it ourselves as ppcg would 1002 // otherwise try to call pet functionality that is not available in 1003 // Polly. 1004 if (IsUser) { 1005 P = isl_printer_start_line(P); 1006 P = isl_printer_print_ast_node(P, Node); 1007 P = isl_printer_end_line(P); 1008 isl_id_free(Id); 1009 isl_ast_print_options_free(Options); 1010 return P; 1011 } 1012 1013 auto Kernel = (struct ppcg_kernel *)isl_id_get_user(Id); 1014 isl_id_free(Id); 1015 Data->Kernels.push_back(Kernel); 1016 } 1017 1018 return print_host_user(P, Options, Node, User); 1019 } 1020 1021 /// Print C code corresponding to the control flow in @p Kernel. 1022 /// 1023 /// @param Kernel The kernel to print 1024 void printKernel(ppcg_kernel *Kernel) { 1025 auto *P = isl_printer_to_str(S->getIslCtx()); 1026 P = isl_printer_set_output_format(P, ISL_FORMAT_C); 1027 auto *Options = isl_ast_print_options_alloc(S->getIslCtx()); 1028 P = isl_ast_node_print(Kernel->tree, P, Options); 1029 char *String = isl_printer_get_str(P); 1030 printf("%s\n", String); 1031 free(String); 1032 isl_printer_free(P); 1033 } 1034 1035 /// Print C code corresponding to the GPU code described by @p Tree. 1036 /// 1037 /// @param Tree An AST describing GPU code 1038 /// @param PPCGProg The PPCG program from which @Tree has been constructed. 1039 void printGPUTree(isl_ast_node *Tree, gpu_prog *PPCGProg) { 1040 auto *P = isl_printer_to_str(S->getIslCtx()); 1041 P = isl_printer_set_output_format(P, ISL_FORMAT_C); 1042 1043 PrintGPUUserData Data; 1044 Data.PPCGProg = PPCGProg; 1045 1046 auto *Options = isl_ast_print_options_alloc(S->getIslCtx()); 1047 Options = 1048 isl_ast_print_options_set_print_user(Options, printHostUser, &Data); 1049 P = isl_ast_node_print(Tree, P, Options); 1050 char *String = isl_printer_get_str(P); 1051 printf("# host\n"); 1052 printf("%s\n", String); 1053 free(String); 1054 isl_printer_free(P); 1055 1056 for (auto Kernel : Data.Kernels) { 1057 printf("# kernel%d\n", Kernel->id); 1058 printKernel(Kernel); 1059 } 1060 } 1061 1062 // Generate a GPU program using PPCG. 1063 // 1064 // GPU mapping consists of multiple steps: 1065 // 1066 // 1) Compute new schedule for the program. 1067 // 2) Map schedule to GPU (TODO) 1068 // 3) Generate code for new schedule (TODO) 1069 // 1070 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer 1071 // is mostly CPU specific. Instead, we use PPCG's GPU code generation 1072 // strategy directly from this pass. 1073 gpu_gen *generateGPU(ppcg_scop *PPCGScop, gpu_prog *PPCGProg) { 1074 1075 auto PPCGGen = isl_calloc_type(S->getIslCtx(), struct gpu_gen); 1076 1077 PPCGGen->ctx = S->getIslCtx(); 1078 PPCGGen->options = PPCGScop->options; 1079 PPCGGen->print = nullptr; 1080 PPCGGen->print_user = nullptr; 1081 PPCGGen->build_ast_expr = &pollyBuildAstExprForStmt; 1082 PPCGGen->prog = PPCGProg; 1083 PPCGGen->tree = nullptr; 1084 PPCGGen->types.n = 0; 1085 PPCGGen->types.name = nullptr; 1086 PPCGGen->sizes = nullptr; 1087 PPCGGen->used_sizes = nullptr; 1088 PPCGGen->kernel_id = 0; 1089 1090 // Set scheduling strategy to same strategy PPCG is using. 1091 isl_options_set_schedule_outer_coincidence(PPCGGen->ctx, true); 1092 isl_options_set_schedule_maximize_band_depth(PPCGGen->ctx, true); 1093 isl_options_set_schedule_whole_component(PPCGGen->ctx, false); 1094 1095 isl_schedule *Schedule = get_schedule(PPCGGen); 1096 1097 int has_permutable = has_any_permutable_node(Schedule); 1098 1099 if (!has_permutable || has_permutable < 0) { 1100 Schedule = isl_schedule_free(Schedule); 1101 } else { 1102 Schedule = map_to_device(PPCGGen, Schedule); 1103 PPCGGen->tree = generate_code(PPCGGen, isl_schedule_copy(Schedule)); 1104 } 1105 1106 if (DumpSchedule) { 1107 isl_printer *P = isl_printer_to_str(S->getIslCtx()); 1108 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK); 1109 P = isl_printer_print_str(P, "Schedule\n"); 1110 P = isl_printer_print_str(P, "========\n"); 1111 if (Schedule) 1112 P = isl_printer_print_schedule(P, Schedule); 1113 else 1114 P = isl_printer_print_str(P, "No schedule found\n"); 1115 1116 printf("%s\n", isl_printer_get_str(P)); 1117 isl_printer_free(P); 1118 } 1119 1120 if (DumpCode) { 1121 printf("Code\n"); 1122 printf("====\n"); 1123 if (PPCGGen->tree) 1124 printGPUTree(PPCGGen->tree, PPCGProg); 1125 else 1126 printf("No code generated\n"); 1127 } 1128 1129 isl_schedule_free(Schedule); 1130 1131 return PPCGGen; 1132 } 1133 1134 /// Free gpu_gen structure. 1135 /// 1136 /// @param PPCGGen The ppcg_gen object to free. 1137 void freePPCGGen(gpu_gen *PPCGGen) { 1138 isl_ast_node_free(PPCGGen->tree); 1139 isl_union_map_free(PPCGGen->sizes); 1140 isl_union_map_free(PPCGGen->used_sizes); 1141 free(PPCGGen); 1142 } 1143 1144 /// Free the options in the ppcg scop structure. 1145 /// 1146 /// ppcg is not freeing these options for us. To avoid leaks we do this 1147 /// ourselves. 1148 /// 1149 /// @param PPCGScop The scop referencing the options to free. 1150 void freeOptions(ppcg_scop *PPCGScop) { 1151 free(PPCGScop->options->debug); 1152 PPCGScop->options->debug = nullptr; 1153 free(PPCGScop->options); 1154 PPCGScop->options = nullptr; 1155 } 1156 1157 /// Generate code for a given GPU AST described by @p Root. 1158 /// 1159 /// @param Root An isl_ast_node pointing to the root of the GPU AST. 1160 /// @param Prog The GPU Program to generate code for. 1161 void generateCode(__isl_take isl_ast_node *Root, gpu_prog *Prog) { 1162 ScopAnnotator Annotator; 1163 Annotator.buildAliasScopes(*S); 1164 1165 Region *R = &S->getRegion(); 1166 1167 simplifyRegion(R, DT, LI, RI); 1168 1169 BasicBlock *EnteringBB = R->getEnteringBlock(); 1170 1171 PollyIRBuilder Builder = createPollyIRBuilder(EnteringBB, Annotator); 1172 1173 GPUNodeBuilder NodeBuilder(Builder, Annotator, this, *DL, *LI, *SE, *DT, *S, 1174 Prog); 1175 1176 // Only build the run-time condition and parameters _after_ having 1177 // introduced the conditional branch. This is important as the conditional 1178 // branch will guard the original scop from new induction variables that 1179 // the SCEVExpander may introduce while code generating the parameters and 1180 // which may introduce scalar dependences that prevent us from correctly 1181 // code generating this scop. 1182 BasicBlock *StartBlock = 1183 executeScopConditionally(*S, this, Builder.getTrue()); 1184 1185 // TODO: Handle LICM 1186 // TODO: Verify run-time checks 1187 auto SplitBlock = StartBlock->getSinglePredecessor(); 1188 Builder.SetInsertPoint(SplitBlock->getTerminator()); 1189 NodeBuilder.addParameters(S->getContext()); 1190 Builder.SetInsertPoint(&*StartBlock->begin()); 1191 NodeBuilder.create(Root); 1192 NodeBuilder.finalizeSCoP(*S); 1193 } 1194 1195 bool runOnScop(Scop &CurrentScop) override { 1196 S = &CurrentScop; 1197 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1198 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1199 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1200 DL = &S->getRegion().getEntry()->getParent()->getParent()->getDataLayout(); 1201 RI = &getAnalysis<RegionInfoPass>().getRegionInfo(); 1202 1203 // We currently do not support scops with invariant loads. 1204 if (S->hasInvariantAccesses()) 1205 return false; 1206 1207 auto PPCGScop = createPPCGScop(); 1208 auto PPCGProg = createPPCGProg(PPCGScop); 1209 auto PPCGGen = generateGPU(PPCGScop, PPCGProg); 1210 1211 if (PPCGGen->tree) 1212 generateCode(isl_ast_node_copy(PPCGGen->tree), PPCGProg); 1213 1214 freeOptions(PPCGScop); 1215 freePPCGGen(PPCGGen); 1216 gpu_prog_free(PPCGProg); 1217 ppcg_scop_free(PPCGScop); 1218 1219 return true; 1220 } 1221 1222 void printScop(raw_ostream &, Scop &) const override {} 1223 1224 void getAnalysisUsage(AnalysisUsage &AU) const override { 1225 AU.addRequired<DominatorTreeWrapperPass>(); 1226 AU.addRequired<RegionInfoPass>(); 1227 AU.addRequired<ScalarEvolutionWrapperPass>(); 1228 AU.addRequired<ScopDetection>(); 1229 AU.addRequired<ScopInfoRegionPass>(); 1230 AU.addRequired<LoopInfoWrapperPass>(); 1231 1232 AU.addPreserved<AAResultsWrapperPass>(); 1233 AU.addPreserved<BasicAAWrapperPass>(); 1234 AU.addPreserved<LoopInfoWrapperPass>(); 1235 AU.addPreserved<DominatorTreeWrapperPass>(); 1236 AU.addPreserved<GlobalsAAWrapperPass>(); 1237 AU.addPreserved<PostDominatorTreeWrapperPass>(); 1238 AU.addPreserved<ScopDetection>(); 1239 AU.addPreserved<ScalarEvolutionWrapperPass>(); 1240 AU.addPreserved<SCEVAAWrapperPass>(); 1241 1242 // FIXME: We do not yet add regions for the newly generated code to the 1243 // region tree. 1244 AU.addPreserved<RegionInfoPass>(); 1245 AU.addPreserved<ScopInfoRegionPass>(); 1246 } 1247 }; 1248 } 1249 1250 char PPCGCodeGeneration::ID = 1; 1251 1252 Pass *polly::createPPCGCodeGenerationPass() { return new PPCGCodeGeneration(); } 1253 1254 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration, "polly-codegen-ppcg", 1255 "Polly - Apply PPCG translation to SCOP", false, false) 1256 INITIALIZE_PASS_DEPENDENCY(DependenceInfo); 1257 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass); 1258 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass); 1259 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass); 1260 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass); 1261 INITIALIZE_PASS_DEPENDENCY(ScopDetection); 1262 INITIALIZE_PASS_END(PPCGCodeGeneration, "polly-codegen-ppcg", 1263 "Polly - Apply PPCG translation to SCOP", false, false) 1264