1 //===------ CodeGeneration.cpp - Code generate the Scops. -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The CodeGeneration pass takes a Scop created by ScopInfo and translates it 11 // back to LLVM-IR using Cloog. 12 // 13 // The Scop describes the high level memory behaviour of a control flow region. 14 // Transformation passes can update the schedule (execution order) of statements 15 // in the Scop. Cloog is used to generate an abstract syntax tree (clast) that 16 // reflects the updated execution order. This clast is used to create new 17 // LLVM-IR that is computational equivalent to the original control flow region, 18 // but executes its code in the new execution order defined by the changed 19 // scattering. 20 // 21 //===----------------------------------------------------------------------===// 22 23 #include "polly/CodeGen/Cloog.h" 24 #ifdef CLOOG_FOUND 25 26 #define DEBUG_TYPE "polly-codegen" 27 #include "polly/Dependences.h" 28 #include "polly/LinkAllPasses.h" 29 #include "polly/ScopInfo.h" 30 #include "polly/TempScopInfo.h" 31 #include "polly/CodeGen/CodeGeneration.h" 32 #include "polly/CodeGen/BlockGenerators.h" 33 #include "polly/CodeGen/LoopGenerators.h" 34 #include "polly/CodeGen/PTXGenerator.h" 35 #include "polly/CodeGen/Utils.h" 36 #include "polly/Support/GICHelper.h" 37 38 #include "llvm/IR/Module.h" 39 #include "llvm/ADT/SetVector.h" 40 #include "llvm/ADT/PostOrderIterator.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/ScalarEvolutionExpander.h" 43 #include "llvm/Support/CommandLine.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/IR/DataLayout.h" 46 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 47 48 #define CLOOG_INT_GMP 1 49 #include "cloog/cloog.h" 50 #include "cloog/isl/cloog.h" 51 52 #include "isl/aff.h" 53 54 #include <vector> 55 #include <utility> 56 57 using namespace polly; 58 using namespace llvm; 59 60 struct isl_set; 61 62 namespace polly { 63 static cl::opt<bool> 64 OpenMP("enable-polly-openmp", 65 cl::desc("Generate OpenMP parallel code"), cl::Hidden, 66 cl::value_desc("OpenMP code generation enabled if true"), 67 cl::init(false), cl::ZeroOrMore); 68 69 #ifdef GPU_CODEGEN 70 static cl::opt<bool> 71 GPGPU("enable-polly-gpgpu", 72 cl::desc("Generate GPU parallel code"), cl::Hidden, 73 cl::value_desc("GPGPU code generation enabled if true"), 74 cl::init(false), cl::ZeroOrMore); 75 76 static cl::opt<std::string> 77 GPUTriple("polly-gpgpu-triple", 78 cl::desc("Target triple for GPU code generation"), 79 cl::Hidden, cl::init("")); 80 #endif /* GPU_CODEGEN */ 81 82 typedef DenseMap<const char*, Value*> CharMapT; 83 84 /// Class to generate LLVM-IR that calculates the value of a clast_expr. 85 class ClastExpCodeGen { 86 IRBuilder<> &Builder; 87 const CharMapT &IVS; 88 89 Value *codegen(const clast_name *e, Type *Ty); 90 Value *codegen(const clast_term *e, Type *Ty); 91 Value *codegen(const clast_binary *e, Type *Ty); 92 Value *codegen(const clast_reduction *r, Type *Ty); 93 public: 94 95 // A generator for clast expressions. 96 // 97 // @param B The IRBuilder that defines where the code to calculate the 98 // clast expressions should be inserted. 99 // @param IVMAP A Map that translates strings describing the induction 100 // variables to the Values* that represent these variables 101 // on the LLVM side. 102 ClastExpCodeGen(IRBuilder<> &B, CharMapT &IVMap); 103 104 // Generates code to calculate a given clast expression. 105 // 106 // @param e The expression to calculate. 107 // @return The Value that holds the result. 108 Value *codegen(const clast_expr *e, Type *Ty); 109 }; 110 111 Value *ClastExpCodeGen::codegen(const clast_name *e, Type *Ty) { 112 CharMapT::const_iterator I = IVS.find(e->name); 113 114 assert(I != IVS.end() && "Clast name not found"); 115 116 return Builder.CreateSExtOrBitCast(I->second, Ty); 117 } 118 119 Value *ClastExpCodeGen::codegen(const clast_term *e, Type *Ty) { 120 APInt a = APInt_from_MPZ(e->val); 121 122 Value *ConstOne = ConstantInt::get(Builder.getContext(), a); 123 ConstOne = Builder.CreateSExtOrBitCast(ConstOne, Ty); 124 125 if (!e->var) 126 return ConstOne; 127 128 Value *var = codegen(e->var, Ty); 129 return Builder.CreateMul(ConstOne, var); 130 } 131 132 Value *ClastExpCodeGen::codegen(const clast_binary *e, Type *Ty) { 133 Value *LHS = codegen(e->LHS, Ty); 134 135 APInt RHS_AP = APInt_from_MPZ(e->RHS); 136 137 Value *RHS = ConstantInt::get(Builder.getContext(), RHS_AP); 138 RHS = Builder.CreateSExtOrBitCast(RHS, Ty); 139 140 switch (e->type) { 141 case clast_bin_mod: 142 return Builder.CreateSRem(LHS, RHS); 143 case clast_bin_fdiv: 144 { 145 // floord(n,d) ((n < 0) ? (n - d + 1) : n) / d 146 Value *One = ConstantInt::get(Ty, 1); 147 Value *Zero = ConstantInt::get(Ty, 0); 148 Value *Sum1 = Builder.CreateSub(LHS, RHS); 149 Value *Sum2 = Builder.CreateAdd(Sum1, One); 150 Value *isNegative = Builder.CreateICmpSLT(LHS, Zero); 151 Value *Dividend = Builder.CreateSelect(isNegative, Sum2, LHS); 152 return Builder.CreateSDiv(Dividend, RHS); 153 } 154 case clast_bin_cdiv: 155 { 156 // ceild(n,d) ((n < 0) ? n : (n + d - 1)) / d 157 Value *One = ConstantInt::get(Ty, 1); 158 Value *Zero = ConstantInt::get(Ty, 0); 159 Value *Sum1 = Builder.CreateAdd(LHS, RHS); 160 Value *Sum2 = Builder.CreateSub(Sum1, One); 161 Value *isNegative = Builder.CreateICmpSLT(LHS, Zero); 162 Value *Dividend = Builder.CreateSelect(isNegative, LHS, Sum2); 163 return Builder.CreateSDiv(Dividend, RHS); 164 } 165 case clast_bin_div: 166 return Builder.CreateSDiv(LHS, RHS); 167 }; 168 169 llvm_unreachable("Unknown clast binary expression type"); 170 } 171 172 Value *ClastExpCodeGen::codegen(const clast_reduction *r, Type *Ty) { 173 assert(( r->type == clast_red_min 174 || r->type == clast_red_max 175 || r->type == clast_red_sum) 176 && "Clast reduction type not supported"); 177 Value *old = codegen(r->elts[0], Ty); 178 179 for (int i = 1; i < r->n; ++i) { 180 Value *exprValue = codegen(r->elts[i], Ty); 181 182 switch (r->type) { 183 case clast_red_min: 184 { 185 Value *cmp = Builder.CreateICmpSLT(old, exprValue); 186 old = Builder.CreateSelect(cmp, old, exprValue); 187 break; 188 } 189 case clast_red_max: 190 { 191 Value *cmp = Builder.CreateICmpSGT(old, exprValue); 192 old = Builder.CreateSelect(cmp, old, exprValue); 193 break; 194 } 195 case clast_red_sum: 196 old = Builder.CreateAdd(old, exprValue); 197 break; 198 } 199 } 200 201 return old; 202 } 203 204 ClastExpCodeGen::ClastExpCodeGen(IRBuilder<> &B, CharMapT &IVMap) 205 : Builder(B), IVS(IVMap) {} 206 207 Value *ClastExpCodeGen::codegen(const clast_expr *e, Type *Ty) { 208 switch(e->type) { 209 case clast_expr_name: 210 return codegen((const clast_name *)e, Ty); 211 case clast_expr_term: 212 return codegen((const clast_term *)e, Ty); 213 case clast_expr_bin: 214 return codegen((const clast_binary *)e, Ty); 215 case clast_expr_red: 216 return codegen((const clast_reduction *)e, Ty); 217 } 218 219 llvm_unreachable("Unknown clast expression!"); 220 } 221 222 class ClastStmtCodeGen { 223 public: 224 const std::vector<std::string> &getParallelLoops(); 225 226 private: 227 // The Scop we code generate. 228 Scop *S; 229 Pass *P; 230 231 // The Builder specifies the current location to code generate at. 232 IRBuilder<> &Builder; 233 234 // Map the Values from the old code to their counterparts in the new code. 235 ValueMapT ValueMap; 236 237 // clastVars maps from the textual representation of a clast variable to its 238 // current *Value. clast variables are scheduling variables, original 239 // induction variables or parameters. They are used either in loop bounds or 240 // to define the statement instance that is executed. 241 // 242 // for (s = 0; s < n + 3; ++i) 243 // for (t = s; t < m; ++j) 244 // Stmt(i = s + 3 * m, j = t); 245 // 246 // {s,t,i,j,n,m} is the set of clast variables in this clast. 247 CharMapT ClastVars; 248 249 // Codegenerator for clast expressions. 250 ClastExpCodeGen ExpGen; 251 252 // Do we currently generate parallel code? 253 bool parallelCodeGeneration; 254 255 std::vector<std::string> parallelLoops; 256 257 void codegen(const clast_assignment *a); 258 259 void codegen(const clast_assignment *a, ScopStmt *Statement, 260 unsigned Dimension, int vectorDim, 261 std::vector<ValueMapT> *VectorVMap = 0); 262 263 void codegenSubstitutions(const clast_stmt *Assignment, 264 ScopStmt *Statement, int vectorDim = 0, 265 std::vector<ValueMapT> *VectorVMap = 0); 266 267 void codegen(const clast_user_stmt *u, std::vector<Value*> *IVS = NULL, 268 const char *iterator = NULL, isl_set *scatteringDomain = 0); 269 270 void codegen(const clast_block *b); 271 272 /// @brief Create a classical sequential loop. 273 void codegenForSequential(const clast_for *f); 274 275 /// @brief Create OpenMP structure values. 276 /// 277 /// Create a list of values that has to be stored into the OpenMP subfuncition 278 /// structure. 279 SetVector<Value*> getOMPValues(const clast_stmt *Body); 280 281 /// @brief Update ClastVars and ValueMap according to a value map. 282 /// 283 /// @param VMap A map from old to new values. 284 void updateWithValueMap(OMPGenerator::ValueToValueMapTy &VMap); 285 286 /// @brief Create an OpenMP parallel for loop. 287 /// 288 /// This loop reflects a loop as if it would have been created by an OpenMP 289 /// statement. 290 void codegenForOpenMP(const clast_for *f); 291 292 #ifdef GPU_CODEGEN 293 /// @brief Create GPGPU device memory access values. 294 /// 295 /// Create a list of values that will be set to be parameters of the GPGPU 296 /// subfunction. These parameters represent device memory base addresses 297 /// and the size in bytes. 298 SetVector<Value*> getGPUValues(unsigned &OutputBytes); 299 300 /// @brief Create a GPU parallel for loop. 301 /// 302 /// This loop reflects a loop as if it would have been created by a GPU 303 /// statement. 304 void codegenForGPGPU(const clast_for *F); 305 306 /// @brief Get innermost for loop. 307 const clast_stmt *getScheduleInfo(const clast_for *F, 308 std::vector<int> &NumIters, 309 unsigned &LoopDepth, 310 unsigned &NonPLoopDepth); 311 #endif /* GPU_CODEGEN */ 312 313 /// @brief Check if a loop is parallel 314 /// 315 /// Detect if a clast_for loop can be executed in parallel. 316 /// 317 /// @param For The clast for loop to check. 318 /// 319 /// @return bool Returns true if the incoming clast_for statement can 320 /// execute in parallel. 321 bool isParallelFor(const clast_for *For); 322 323 bool isInnermostLoop(const clast_for *f); 324 325 /// @brief Get the number of loop iterations for this loop. 326 /// @param f The clast for loop to check. 327 int getNumberOfIterations(const clast_for *f); 328 329 /// @brief Create vector instructions for this loop. 330 void codegenForVector(const clast_for *f); 331 332 void codegen(const clast_for *f); 333 334 Value *codegen(const clast_equation *eq); 335 336 void codegen(const clast_guard *g); 337 338 void codegen(const clast_stmt *stmt); 339 340 void addParameters(const CloogNames *names); 341 342 IntegerType *getIntPtrTy(); 343 344 public: 345 void codegen(const clast_root *r); 346 347 ClastStmtCodeGen(Scop *scop, IRBuilder<> &B, Pass *P); 348 }; 349 } 350 351 IntegerType *ClastStmtCodeGen::getIntPtrTy() { 352 return P->getAnalysis<DataLayout>().getIntPtrType(Builder.getContext()); 353 } 354 355 const std::vector<std::string> &ClastStmtCodeGen::getParallelLoops() { 356 return parallelLoops; 357 } 358 359 void ClastStmtCodeGen::codegen(const clast_assignment *a) { 360 Value *V = ExpGen.codegen(a->RHS, getIntPtrTy()); 361 ClastVars[a->LHS] = V; 362 } 363 364 void ClastStmtCodeGen::codegen(const clast_assignment *A, ScopStmt *Stmt, 365 unsigned Dim, int VectorDim, 366 std::vector<ValueMapT> *VectorVMap) { 367 const PHINode *PN; 368 Value *RHS; 369 370 assert(!A->LHS && "Statement assignments do not have left hand side"); 371 372 PN = Stmt->getInductionVariableForDimension(Dim); 373 RHS = ExpGen.codegen(A->RHS, Builder.getInt64Ty()); 374 RHS = Builder.CreateTruncOrBitCast(RHS, PN->getType()); 375 376 if (VectorVMap) 377 (*VectorVMap)[VectorDim][PN] = RHS; 378 379 ValueMap[PN] = RHS; 380 } 381 382 void ClastStmtCodeGen::codegenSubstitutions(const clast_stmt *Assignment, 383 ScopStmt *Statement, int vectorDim, 384 std::vector<ValueMapT> *VectorVMap) { 385 int Dimension = 0; 386 387 while (Assignment) { 388 assert(CLAST_STMT_IS_A(Assignment, stmt_ass) && 389 "Substitions are expected to be assignments"); 390 codegen((const clast_assignment *) Assignment, Statement, Dimension, 391 vectorDim, VectorVMap); 392 Assignment = Assignment->next; 393 Dimension++; 394 } 395 } 396 397 // Takes the cloog specific domain and translates it into a map Statement -> 398 // PartialSchedule, where the PartialSchedule contains all the dimensions that 399 // have been code generated up to this point. 400 static __isl_give isl_map *extractPartialSchedule(ScopStmt *Statement, 401 isl_set *Domain) { 402 isl_map *Schedule = Statement->getScattering(); 403 int ScheduledDimensions = isl_set_dim(Domain, isl_dim_set); 404 int UnscheduledDimensions = isl_map_dim(Schedule, isl_dim_out) - ScheduledDimensions; 405 406 return isl_map_project_out(Schedule, isl_dim_out, ScheduledDimensions, 407 UnscheduledDimensions); 408 } 409 410 void ClastStmtCodeGen::codegen(const clast_user_stmt *u, 411 std::vector<Value*> *IVS , const char *iterator, 412 isl_set *Domain) { 413 ScopStmt *Statement = (ScopStmt *)u->statement->usr; 414 415 if (u->substitutions) 416 codegenSubstitutions(u->substitutions, Statement); 417 418 int VectorDimensions = IVS ? IVS->size() : 1; 419 420 if (VectorDimensions == 1) { 421 BlockGenerator::generate(Builder, *Statement, ValueMap, P); 422 return; 423 } 424 425 VectorValueMapT VectorMap(VectorDimensions); 426 427 if (IVS) { 428 assert(u->substitutions && "Substitutions expected!"); 429 int i = 0; 430 for (std::vector<Value*>::iterator II = IVS->begin(), IE = IVS->end(); 431 II != IE; ++II) { 432 ClastVars[iterator] = *II; 433 codegenSubstitutions(u->substitutions, Statement, i, &VectorMap); 434 i++; 435 } 436 } 437 438 isl_map *Schedule = extractPartialSchedule(Statement, Domain); 439 VectorBlockGenerator::generate(Builder, *Statement, VectorMap, Schedule, P); 440 isl_map_free(Schedule); 441 } 442 443 void ClastStmtCodeGen::codegen(const clast_block *b) { 444 if (b->body) 445 codegen(b->body); 446 } 447 448 void ClastStmtCodeGen::codegenForSequential(const clast_for *f) { 449 Value *LowerBound, *UpperBound, *IV, *Stride; 450 BasicBlock *AfterBB; 451 Type *IntPtrTy = getIntPtrTy(); 452 453 LowerBound = ExpGen.codegen(f->LB, IntPtrTy); 454 UpperBound = ExpGen.codegen(f->UB, IntPtrTy); 455 Stride = Builder.getInt(APInt_from_MPZ(f->stride)); 456 457 IV = createLoop(LowerBound, UpperBound, Stride, Builder, P, AfterBB, 458 CmpInst::ICMP_SLE); 459 460 // Add loop iv to symbols. 461 ClastVars[f->iterator] = IV; 462 463 if (f->body) 464 codegen(f->body); 465 466 // Loop is finished, so remove its iv from the live symbols. 467 ClastVars.erase(f->iterator); 468 Builder.SetInsertPoint(AfterBB->begin()); 469 } 470 471 // Helper class to determine all scalar parameters used in the basic blocks of a 472 // clast. Scalar parameters are scalar variables defined outside of the SCoP. 473 class ParameterVisitor : public ClastVisitor { 474 std::set<Value *> Values; 475 public: 476 ParameterVisitor() : ClastVisitor(), Values() { } 477 478 void visitUser(const clast_user_stmt *Stmt) { 479 const ScopStmt *S = static_cast<const ScopStmt *>(Stmt->statement->usr); 480 const BasicBlock *BB = S->getBasicBlock(); 481 482 // Check all the operands of instructions in the basic block. 483 for (BasicBlock::const_iterator BI = BB->begin(), BE = BB->end(); BI != BE; 484 ++BI) { 485 const Instruction &Inst = *BI; 486 for (Instruction::const_op_iterator II = Inst.op_begin(), 487 IE = Inst.op_end(); II != IE; ++II) { 488 Value *SrcVal = *II; 489 490 if (Instruction *OpInst = dyn_cast<Instruction>(SrcVal)) 491 if (S->getParent()->getRegion().contains(OpInst)) 492 continue; 493 494 if (isa<Instruction>(SrcVal) || isa<Argument>(SrcVal)) 495 Values.insert(SrcVal); 496 } 497 } 498 } 499 500 // Iterator to iterate over the values found. 501 typedef std::set<Value *>::const_iterator const_iterator; 502 inline const_iterator begin() const { return Values.begin(); } 503 inline const_iterator end() const { return Values.end(); } 504 }; 505 506 SetVector<Value*> ClastStmtCodeGen::getOMPValues(const clast_stmt *Body) { 507 SetVector<Value*> Values; 508 509 // The clast variables 510 for (CharMapT::iterator I = ClastVars.begin(), E = ClastVars.end(); 511 I != E; I++) 512 Values.insert(I->second); 513 514 // Find the temporaries that are referenced in the clast statements' 515 // basic blocks but are not defined by these blocks (e.g., references 516 // to function arguments or temporaries defined before the start of 517 // the SCoP). 518 ParameterVisitor Params; 519 Params.visit(Body); 520 521 for (ParameterVisitor::const_iterator PI = Params.begin(), PE = Params.end(); 522 PI != PE; ++PI) { 523 Value *V = *PI; 524 Values.insert(V); 525 DEBUG(dbgs() << "Adding temporary for OMP copy-in: " << *V << "\n"); 526 } 527 528 return Values; 529 } 530 531 void ClastStmtCodeGen::updateWithValueMap( 532 OMPGenerator::ValueToValueMapTy &VMap) { 533 std::set<Value*> Inserted; 534 535 for (CharMapT::iterator I = ClastVars.begin(), E = ClastVars.end(); 536 I != E; I++) { 537 ClastVars[I->first] = VMap[I->second]; 538 Inserted.insert(I->second); 539 } 540 541 for (OMPGenerator::ValueToValueMapTy::iterator I = VMap.begin(), 542 E = VMap.end(); I != E; ++I) { 543 if (Inserted.count(I->first)) 544 continue; 545 546 ValueMap[I->first] = I->second; 547 } 548 } 549 550 static void clearDomtree(Function *F, DominatorTree &DT) { 551 DomTreeNode *N = DT.getNode(&F->getEntryBlock()); 552 std::vector<BasicBlock*> Nodes; 553 for (po_iterator<DomTreeNode*> I = po_begin(N), E = po_end(N); I != E; ++I) 554 Nodes.push_back(I->getBlock()); 555 556 for (std::vector<BasicBlock*>::iterator I = Nodes.begin(), E = Nodes.end(); 557 I != E; ++I) 558 DT.eraseNode(*I); 559 } 560 561 void ClastStmtCodeGen::codegenForOpenMP(const clast_for *For) { 562 Value *Stride, *LB, *UB, *IV; 563 BasicBlock::iterator LoopBody; 564 IntegerType *IntPtrTy = getIntPtrTy(); 565 SetVector<Value*> Values; 566 OMPGenerator::ValueToValueMapTy VMap; 567 OMPGenerator OMPGen(Builder, P); 568 569 Stride = Builder.getInt(APInt_from_MPZ(For->stride)); 570 Stride = Builder.CreateSExtOrBitCast(Stride, IntPtrTy); 571 LB = ExpGen.codegen(For->LB, IntPtrTy); 572 UB = ExpGen.codegen(For->UB, IntPtrTy); 573 574 Values = getOMPValues(For->body); 575 576 IV = OMPGen.createParallelLoop(LB, UB, Stride, Values, VMap, &LoopBody); 577 BasicBlock::iterator AfterLoop = Builder.GetInsertPoint(); 578 Builder.SetInsertPoint(LoopBody); 579 580 // Save the current values. 581 const ValueMapT ValueMapCopy = ValueMap; 582 const CharMapT ClastVarsCopy = ClastVars; 583 584 updateWithValueMap(VMap); 585 ClastVars[For->iterator] = IV; 586 587 if (For->body) 588 codegen(For->body); 589 590 // Restore the original values. 591 ValueMap = ValueMapCopy; 592 ClastVars = ClastVarsCopy; 593 594 clearDomtree((*LoopBody).getParent()->getParent(), 595 P->getAnalysis<DominatorTree>()); 596 597 Builder.SetInsertPoint(AfterLoop); 598 } 599 600 #ifdef GPU_CODEGEN 601 static unsigned getArraySizeInBytes(const ArrayType *AT) { 602 unsigned Bytes = AT->getNumElements(); 603 if (const ArrayType *T = dyn_cast<ArrayType>(AT->getElementType())) 604 Bytes *= getArraySizeInBytes(T); 605 else 606 Bytes *= AT->getElementType()->getPrimitiveSizeInBits() / 8; 607 608 return Bytes; 609 } 610 611 SetVector<Value*> ClastStmtCodeGen::getGPUValues(unsigned &OutputBytes) { 612 SetVector<Value*> Values; 613 OutputBytes = 0; 614 615 // Record the memory reference base addresses. 616 for (Scop::iterator SI = S->begin(), SE = S->end(); SI != SE; ++SI) { 617 ScopStmt *Stmt = *SI; 618 for (SmallVector<MemoryAccess*, 8>::iterator I = Stmt->memacc_begin(), 619 E = Stmt->memacc_end(); I != E; ++I) { 620 Value *BaseAddr = const_cast<Value*>((*I)->getBaseAddr()); 621 Values.insert((BaseAddr)); 622 623 // FIXME: we assume that there is one and only one array to be written 624 // in a SCoP. 625 int NumWrites = 0; 626 if ((*I)->isWrite()) { 627 ++NumWrites; 628 assert(NumWrites <= 1 && 629 "We support at most one array to be written in a SCoP."); 630 if (const PointerType * PT = 631 dyn_cast<PointerType>(BaseAddr->getType())) { 632 Type *T = PT->getArrayElementType(); 633 const ArrayType *ATy = dyn_cast<ArrayType>(T); 634 OutputBytes = getArraySizeInBytes(ATy); 635 } 636 } 637 } 638 } 639 640 return Values; 641 } 642 643 const clast_stmt *ClastStmtCodeGen::getScheduleInfo(const clast_for *F, 644 std::vector<int> &NumIters, 645 unsigned &LoopDepth, 646 unsigned &NonPLoopDepth) { 647 clast_stmt *Stmt = (clast_stmt *)F; 648 const clast_for *Result; 649 bool NonParaFlag = false; 650 LoopDepth = 0; 651 NonPLoopDepth = 0; 652 653 while (Stmt) { 654 if (CLAST_STMT_IS_A(Stmt, stmt_for)) { 655 const clast_for *T = (clast_for *) Stmt; 656 if (isParallelFor(T)) { 657 if (!NonParaFlag) { 658 NumIters.push_back(getNumberOfIterations(T)); 659 Result = T; 660 } 661 } else 662 NonParaFlag = true; 663 664 Stmt = T->body; 665 LoopDepth++; 666 continue; 667 } 668 Stmt = Stmt->next; 669 } 670 671 assert(NumIters.size() == 4 && 672 "The loops should be tiled into 4-depth parallel loops and an " 673 "innermost non-parallel one (if exist)."); 674 NonPLoopDepth = LoopDepth - NumIters.size(); 675 assert(NonPLoopDepth <= 1 676 && "We support only one innermost non-parallel loop currently."); 677 return (const clast_stmt *)Result->body; 678 } 679 680 void ClastStmtCodeGen::codegenForGPGPU(const clast_for *F) { 681 BasicBlock::iterator LoopBody; 682 SetVector<Value *> Values; 683 SetVector<Value *> IVS; 684 std::vector<int> NumIterations; 685 PTXGenerator::ValueToValueMapTy VMap; 686 687 assert(!GPUTriple.empty() && 688 "Target triple should be set properly for GPGPU code generation."); 689 PTXGenerator PTXGen(Builder, P, GPUTriple); 690 691 // Get original IVS and ScopStmt 692 unsigned TiledLoopDepth, NonPLoopDepth; 693 const clast_stmt *InnerStmt = getScheduleInfo(F, NumIterations, 694 TiledLoopDepth, NonPLoopDepth); 695 const clast_stmt *TmpStmt; 696 const clast_user_stmt *U; 697 const clast_for *InnerFor; 698 if (CLAST_STMT_IS_A(InnerStmt, stmt_for)) { 699 InnerFor = (const clast_for *)InnerStmt; 700 TmpStmt = InnerFor->body; 701 } else 702 TmpStmt = InnerStmt; 703 U = (const clast_user_stmt *) TmpStmt; 704 ScopStmt *Statement = (ScopStmt *) U->statement->usr; 705 for (unsigned i = 0; i < Statement->getNumIterators() - NonPLoopDepth; i++) { 706 const Value *IV = Statement->getInductionVariableForDimension(i); 707 IVS.insert(const_cast<Value *>(IV)); 708 } 709 710 unsigned OutBytes; 711 Values = getGPUValues(OutBytes); 712 PTXGen.setOutputBytes(OutBytes); 713 PTXGen.startGeneration(Values, IVS, VMap, &LoopBody); 714 715 BasicBlock::iterator AfterLoop = Builder.GetInsertPoint(); 716 Builder.SetInsertPoint(LoopBody); 717 718 BasicBlock *AfterBB = 0; 719 if (NonPLoopDepth) { 720 Value *LowerBound, *UpperBound, *IV, *Stride; 721 Type *IntPtrTy = getIntPtrTy(); 722 LowerBound = ExpGen.codegen(InnerFor->LB, IntPtrTy); 723 UpperBound = ExpGen.codegen(InnerFor->UB, IntPtrTy); 724 Stride = Builder.getInt(APInt_from_MPZ(InnerFor->stride)); 725 IV = createLoop(LowerBound, UpperBound, Stride, Builder, P, AfterBB, 726 CmpInst::ICMP_SLE); 727 const Value *OldIV_ = Statement->getInductionVariableForDimension(2); 728 Value *OldIV = const_cast<Value *>(OldIV_); 729 VMap.insert(std::make_pair<Value*, Value*>(OldIV, IV)); 730 } 731 732 updateWithValueMap(VMap); 733 734 BlockGenerator::generate(Builder, *Statement, ValueMap, P); 735 736 if (AfterBB) 737 Builder.SetInsertPoint(AfterBB->begin()); 738 739 // FIXME: The replacement of the host base address with the parameter of ptx 740 // subfunction should have been done by updateWithValueMap. We use the 741 // following codes to avoid affecting other parts of Polly. This should be 742 // fixed later. 743 Function *FN = Builder.GetInsertBlock()->getParent(); 744 for (unsigned j = 0; j < Values.size(); j++) { 745 Value *baseAddr = Values[j]; 746 for (Function::iterator B = FN->begin(); B != FN->end(); ++B) { 747 for (BasicBlock::iterator I = B->begin(); I != B->end(); ++I) 748 I->replaceUsesOfWith(baseAddr, ValueMap[baseAddr]); 749 } 750 } 751 Builder.SetInsertPoint(AfterLoop); 752 PTXGen.setLaunchingParameters(NumIterations[0], NumIterations[1], 753 NumIterations[2], NumIterations[3]); 754 PTXGen.finishGeneration(FN); 755 } 756 #endif 757 758 bool ClastStmtCodeGen::isInnermostLoop(const clast_for *f) { 759 const clast_stmt *stmt = f->body; 760 761 while (stmt) { 762 if (!CLAST_STMT_IS_A(stmt, stmt_user)) 763 return false; 764 765 stmt = stmt->next; 766 } 767 768 return true; 769 } 770 771 int ClastStmtCodeGen::getNumberOfIterations(const clast_for *For) { 772 isl_set *LoopDomain = isl_set_copy(isl_set_from_cloog_domain(For->domain)); 773 int NumberOfIterations = polly::getNumberOfIterations(LoopDomain); 774 if (NumberOfIterations == -1) 775 return -1; 776 return NumberOfIterations / isl_int_get_si(For->stride) + 1; 777 } 778 779 void ClastStmtCodeGen::codegenForVector(const clast_for *F) { 780 DEBUG(dbgs() << "Vectorizing loop '" << F->iterator << "'\n";); 781 int VectorWidth = getNumberOfIterations(F); 782 783 Value *LB = ExpGen.codegen(F->LB, getIntPtrTy()); 784 785 APInt Stride = APInt_from_MPZ(F->stride); 786 IntegerType *LoopIVType = dyn_cast<IntegerType>(LB->getType()); 787 Stride = Stride.zext(LoopIVType->getBitWidth()); 788 Value *StrideValue = ConstantInt::get(LoopIVType, Stride); 789 790 std::vector<Value*> IVS(VectorWidth); 791 IVS[0] = LB; 792 793 for (int i = 1; i < VectorWidth; i++) 794 IVS[i] = Builder.CreateAdd(IVS[i-1], StrideValue, "p_vector_iv"); 795 796 isl_set *Domain = isl_set_from_cloog_domain(F->domain); 797 798 // Add loop iv to symbols. 799 ClastVars[F->iterator] = LB; 800 801 const clast_stmt *Stmt = F->body; 802 803 while (Stmt) { 804 codegen((const clast_user_stmt *)Stmt, &IVS, F->iterator, 805 isl_set_copy(Domain)); 806 Stmt = Stmt->next; 807 } 808 809 // Loop is finished, so remove its iv from the live symbols. 810 isl_set_free(Domain); 811 ClastVars.erase(F->iterator); 812 } 813 814 bool ClastStmtCodeGen::isParallelFor(const clast_for *f) { 815 isl_set *Domain = isl_set_from_cloog_domain(f->domain); 816 assert(Domain && "Cannot access domain of loop"); 817 818 Dependences &D = P->getAnalysis<Dependences>(); 819 820 return D.isParallelDimension(isl_set_copy(Domain), isl_set_n_dim(Domain)); 821 } 822 823 void ClastStmtCodeGen::codegen(const clast_for *f) { 824 bool Vector = PollyVectorizerChoice != VECTORIZER_NONE; 825 if ((Vector || OpenMP) && isParallelFor(f)) { 826 if (Vector && isInnermostLoop(f) && (-1 != getNumberOfIterations(f)) && 827 (getNumberOfIterations(f) <= 16)) { 828 codegenForVector(f); 829 return; 830 } 831 832 if (OpenMP && !parallelCodeGeneration) { 833 parallelCodeGeneration = true; 834 parallelLoops.push_back(f->iterator); 835 codegenForOpenMP(f); 836 parallelCodeGeneration = false; 837 return; 838 } 839 } 840 841 #ifdef GPU_CODEGEN 842 if (GPGPU && isParallelFor(f)) { 843 if (!parallelCodeGeneration) { 844 parallelCodeGeneration = true; 845 parallelLoops.push_back(f->iterator); 846 codegenForGPGPU(f); 847 parallelCodeGeneration = false; 848 return; 849 } 850 } 851 #endif 852 853 codegenForSequential(f); 854 } 855 856 Value *ClastStmtCodeGen::codegen(const clast_equation *eq) { 857 Value *LHS = ExpGen.codegen(eq->LHS, getIntPtrTy()); 858 Value *RHS = ExpGen.codegen(eq->RHS, getIntPtrTy()); 859 CmpInst::Predicate P; 860 861 if (eq->sign == 0) 862 P = ICmpInst::ICMP_EQ; 863 else if (eq->sign > 0) 864 P = ICmpInst::ICMP_SGE; 865 else 866 P = ICmpInst::ICMP_SLE; 867 868 return Builder.CreateICmp(P, LHS, RHS); 869 } 870 871 void ClastStmtCodeGen::codegen(const clast_guard *g) { 872 Function *F = Builder.GetInsertBlock()->getParent(); 873 LLVMContext &Context = F->getContext(); 874 875 BasicBlock *CondBB = SplitBlock(Builder.GetInsertBlock(), 876 Builder.GetInsertPoint(), P); 877 CondBB->setName("polly.cond"); 878 BasicBlock *MergeBB = SplitBlock(CondBB, CondBB->begin(), P); 879 MergeBB->setName("polly.merge"); 880 BasicBlock *ThenBB = BasicBlock::Create(Context, "polly.then", F); 881 882 DominatorTree &DT = P->getAnalysis<DominatorTree>(); 883 DT.addNewBlock(ThenBB, CondBB); 884 DT.changeImmediateDominator(MergeBB, CondBB); 885 886 CondBB->getTerminator()->eraseFromParent(); 887 888 Builder.SetInsertPoint(CondBB); 889 890 Value *Predicate = codegen(&(g->eq[0])); 891 892 for (int i = 1; i < g->n; ++i) { 893 Value *TmpPredicate = codegen(&(g->eq[i])); 894 Predicate = Builder.CreateAnd(Predicate, TmpPredicate); 895 } 896 897 Builder.CreateCondBr(Predicate, ThenBB, MergeBB); 898 Builder.SetInsertPoint(ThenBB); 899 Builder.CreateBr(MergeBB); 900 Builder.SetInsertPoint(ThenBB->begin()); 901 902 codegen(g->then); 903 904 Builder.SetInsertPoint(MergeBB->begin()); 905 } 906 907 void ClastStmtCodeGen::codegen(const clast_stmt *stmt) { 908 if (CLAST_STMT_IS_A(stmt, stmt_root)) 909 assert(false && "No second root statement expected"); 910 else if (CLAST_STMT_IS_A(stmt, stmt_ass)) 911 codegen((const clast_assignment *)stmt); 912 else if (CLAST_STMT_IS_A(stmt, stmt_user)) 913 codegen((const clast_user_stmt *)stmt); 914 else if (CLAST_STMT_IS_A(stmt, stmt_block)) 915 codegen((const clast_block *)stmt); 916 else if (CLAST_STMT_IS_A(stmt, stmt_for)) 917 codegen((const clast_for *)stmt); 918 else if (CLAST_STMT_IS_A(stmt, stmt_guard)) 919 codegen((const clast_guard *)stmt); 920 921 if (stmt->next) 922 codegen(stmt->next); 923 } 924 925 void ClastStmtCodeGen::addParameters(const CloogNames *names) { 926 SCEVExpander Rewriter(P->getAnalysis<ScalarEvolution>(), "polly"); 927 928 int i = 0; 929 for (Scop::param_iterator PI = S->param_begin(), PE = S->param_end(); 930 PI != PE; ++PI) { 931 assert(i < names->nb_parameters && "Not enough parameter names"); 932 933 const SCEV *Param = *PI; 934 Type *Ty = Param->getType(); 935 936 Instruction *insertLocation = --(Builder.GetInsertBlock()->end()); 937 Value *V = Rewriter.expandCodeFor(Param, Ty, insertLocation); 938 ClastVars[names->parameters[i]] = V; 939 940 ++i; 941 } 942 } 943 944 void ClastStmtCodeGen::codegen(const clast_root *r) { 945 addParameters(r->names); 946 947 parallelCodeGeneration = false; 948 949 const clast_stmt *stmt = (const clast_stmt*) r; 950 if (stmt->next) 951 codegen(stmt->next); 952 } 953 954 ClastStmtCodeGen::ClastStmtCodeGen(Scop *scop, IRBuilder<> &B, Pass *P) 955 : S(scop), P(P), Builder(B), ExpGen(Builder, ClastVars) { 956 } 957 958 namespace { 959 class CodeGeneration : public ScopPass { 960 std::vector<std::string> ParallelLoops; 961 962 public: 963 static char ID; 964 965 CodeGeneration() : ScopPass(ID) {} 966 967 968 bool runOnScop(Scop &S) { 969 ParallelLoops.clear(); 970 971 assert(S.getRegion().isSimple() && "Only simple regions are supported"); 972 973 BasicBlock *StartBlock = executeScopConditionally(S, this); 974 975 IRBuilder<> Builder(StartBlock->begin()); 976 977 ClastStmtCodeGen CodeGen(&S, Builder, this); 978 CloogInfo &C = getAnalysis<CloogInfo>(); 979 CodeGen.codegen(C.getClast()); 980 981 ParallelLoops.insert(ParallelLoops.begin(), 982 CodeGen.getParallelLoops().begin(), 983 CodeGen.getParallelLoops().end()); 984 return true; 985 } 986 987 virtual void printScop(raw_ostream &OS) const { 988 for (std::vector<std::string>::const_iterator PI = ParallelLoops.begin(), 989 PE = ParallelLoops.end(); PI != PE; ++PI) 990 OS << "Parallel loop with iterator '" << *PI << "' generated\n"; 991 } 992 993 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 994 AU.addRequired<CloogInfo>(); 995 AU.addRequired<Dependences>(); 996 AU.addRequired<DominatorTree>(); 997 AU.addRequired<RegionInfo>(); 998 AU.addRequired<ScalarEvolution>(); 999 AU.addRequired<ScopDetection>(); 1000 AU.addRequired<ScopInfo>(); 1001 AU.addRequired<DataLayout>(); 1002 1003 AU.addPreserved<CloogInfo>(); 1004 AU.addPreserved<Dependences>(); 1005 1006 // FIXME: We do not create LoopInfo for the newly generated loops. 1007 AU.addPreserved<LoopInfo>(); 1008 AU.addPreserved<DominatorTree>(); 1009 AU.addPreserved<ScopDetection>(); 1010 AU.addPreserved<ScalarEvolution>(); 1011 1012 // FIXME: We do not yet add regions for the newly generated code to the 1013 // region tree. 1014 AU.addPreserved<RegionInfo>(); 1015 AU.addPreserved<TempScopInfo>(); 1016 AU.addPreserved<ScopInfo>(); 1017 AU.addPreservedID(IndependentBlocksID); 1018 } 1019 }; 1020 } 1021 1022 char CodeGeneration::ID = 1; 1023 1024 INITIALIZE_PASS_BEGIN(CodeGeneration, "polly-codegen", 1025 "Polly - Create LLVM-IR from SCoPs", false, false) 1026 INITIALIZE_PASS_DEPENDENCY(CloogInfo) 1027 INITIALIZE_PASS_DEPENDENCY(Dependences) 1028 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 1029 INITIALIZE_PASS_DEPENDENCY(RegionInfo) 1030 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 1031 INITIALIZE_PASS_DEPENDENCY(ScopDetection) 1032 INITIALIZE_PASS_DEPENDENCY(DataLayout) 1033 INITIALIZE_PASS_END(CodeGeneration, "polly-codegen", 1034 "Polly - Create LLVM-IR from SCoPs", false, false) 1035 1036 Pass *polly::createCodeGenerationPass() { 1037 return new CodeGeneration(); 1038 } 1039 1040 #endif // CLOOG_FOUND 1041