1 //===------ CodeGeneration.cpp - Code generate the Scops. -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The CodeGeneration pass takes a Scop created by ScopInfo and translates it 11 // back to LLVM-IR using Cloog. 12 // 13 // The Scop describes the high level memory behaviour of a control flow region. 14 // Transformation passes can update the schedule (execution order) of statements 15 // in the Scop. Cloog is used to generate an abstract syntax tree (clast) that 16 // reflects the updated execution order. This clast is used to create new 17 // LLVM-IR that is computational equivalent to the original control flow region, 18 // but executes its code in the new execution order defined by the changed 19 // scattering. 20 // 21 //===----------------------------------------------------------------------===// 22 23 #include "polly/CodeGen/Cloog.h" 24 #ifdef CLOOG_FOUND 25 26 #define DEBUG_TYPE "polly-codegen" 27 #include "polly/Dependences.h" 28 #include "polly/LinkAllPasses.h" 29 #include "polly/ScopInfo.h" 30 #include "polly/TempScopInfo.h" 31 #include "polly/CodeGen/CodeGeneration.h" 32 #include "polly/CodeGen/BlockGenerators.h" 33 #include "polly/CodeGen/LoopGenerators.h" 34 #include "polly/CodeGen/PTXGenerator.h" 35 #include "polly/CodeGen/Utils.h" 36 #include "polly/Support/GICHelper.h" 37 38 #include "llvm/IR/Module.h" 39 #include "llvm/ADT/SetVector.h" 40 #include "llvm/ADT/PostOrderIterator.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/ScalarEvolutionExpander.h" 43 #include "llvm/Support/CommandLine.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/IR/DataLayout.h" 46 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 47 48 #define CLOOG_INT_GMP 1 49 #include "cloog/cloog.h" 50 #include "cloog/isl/cloog.h" 51 52 #include "isl/aff.h" 53 54 #include <vector> 55 #include <utility> 56 57 using namespace polly; 58 using namespace llvm; 59 60 struct isl_set; 61 62 namespace polly { 63 static cl::opt<bool> 64 OpenMP("enable-polly-openmp", cl::desc("Generate OpenMP parallel code"), 65 cl::Hidden, cl::value_desc("OpenMP code generation enabled if true"), 66 cl::init(false), cl::ZeroOrMore); 67 68 #ifdef GPU_CODEGEN 69 static cl::opt<bool> 70 GPGPU("enable-polly-gpgpu", cl::desc("Generate GPU parallel code"), cl::Hidden, 71 cl::value_desc("GPGPU code generation enabled if true"), cl::init(false), 72 cl::ZeroOrMore); 73 74 static cl::opt<std::string> GPUTriple( 75 "polly-gpgpu-triple", cl::desc("Target triple for GPU code generation"), 76 cl::Hidden, cl::init("")); 77 #endif /* GPU_CODEGEN */ 78 79 typedef DenseMap<const char *, Value *> CharMapT; 80 81 /// Class to generate LLVM-IR that calculates the value of a clast_expr. 82 class ClastExpCodeGen { 83 IRBuilder<> &Builder; 84 const CharMapT &IVS; 85 86 Value *codegen(const clast_name *e, Type *Ty); 87 Value *codegen(const clast_term *e, Type *Ty); 88 Value *codegen(const clast_binary *e, Type *Ty); 89 Value *codegen(const clast_reduction *r, Type *Ty); 90 public: 91 92 // A generator for clast expressions. 93 // 94 // @param B The IRBuilder that defines where the code to calculate the 95 // clast expressions should be inserted. 96 // @param IVMAP A Map that translates strings describing the induction 97 // variables to the Values* that represent these variables 98 // on the LLVM side. 99 ClastExpCodeGen(IRBuilder<> &B, CharMapT &IVMap); 100 101 // Generates code to calculate a given clast expression. 102 // 103 // @param e The expression to calculate. 104 // @return The Value that holds the result. 105 Value *codegen(const clast_expr *e, Type *Ty); 106 }; 107 108 Value *ClastExpCodeGen::codegen(const clast_name *e, Type *Ty) { 109 CharMapT::const_iterator I = IVS.find(e->name); 110 111 assert(I != IVS.end() && "Clast name not found"); 112 113 return Builder.CreateSExtOrBitCast(I->second, Ty); 114 } 115 116 Value *ClastExpCodeGen::codegen(const clast_term *e, Type *Ty) { 117 APInt a = APInt_from_MPZ(e->val); 118 119 Value *ConstOne = ConstantInt::get(Builder.getContext(), a); 120 ConstOne = Builder.CreateSExtOrBitCast(ConstOne, Ty); 121 122 if (!e->var) 123 return ConstOne; 124 125 Value *var = codegen(e->var, Ty); 126 return Builder.CreateMul(ConstOne, var); 127 } 128 129 Value *ClastExpCodeGen::codegen(const clast_binary *e, Type *Ty) { 130 Value *LHS = codegen(e->LHS, Ty); 131 132 APInt RHS_AP = APInt_from_MPZ(e->RHS); 133 134 Value *RHS = ConstantInt::get(Builder.getContext(), RHS_AP); 135 RHS = Builder.CreateSExtOrBitCast(RHS, Ty); 136 137 switch (e->type) { 138 case clast_bin_mod: 139 return Builder.CreateSRem(LHS, RHS); 140 case clast_bin_fdiv: { 141 // floord(n,d) ((n < 0) ? (n - d + 1) : n) / d 142 Value *One = ConstantInt::get(Ty, 1); 143 Value *Zero = ConstantInt::get(Ty, 0); 144 Value *Sum1 = Builder.CreateSub(LHS, RHS); 145 Value *Sum2 = Builder.CreateAdd(Sum1, One); 146 Value *isNegative = Builder.CreateICmpSLT(LHS, Zero); 147 Value *Dividend = Builder.CreateSelect(isNegative, Sum2, LHS); 148 return Builder.CreateSDiv(Dividend, RHS); 149 } 150 case clast_bin_cdiv: { 151 // ceild(n,d) ((n < 0) ? n : (n + d - 1)) / d 152 Value *One = ConstantInt::get(Ty, 1); 153 Value *Zero = ConstantInt::get(Ty, 0); 154 Value *Sum1 = Builder.CreateAdd(LHS, RHS); 155 Value *Sum2 = Builder.CreateSub(Sum1, One); 156 Value *isNegative = Builder.CreateICmpSLT(LHS, Zero); 157 Value *Dividend = Builder.CreateSelect(isNegative, LHS, Sum2); 158 return Builder.CreateSDiv(Dividend, RHS); 159 } 160 case clast_bin_div: 161 return Builder.CreateSDiv(LHS, RHS); 162 } 163 164 llvm_unreachable("Unknown clast binary expression type"); 165 } 166 167 Value *ClastExpCodeGen::codegen(const clast_reduction *r, Type *Ty) { 168 assert((r->type == clast_red_min || r->type == clast_red_max || 169 r->type == clast_red_sum) && "Clast reduction type not supported"); 170 Value *old = codegen(r->elts[0], Ty); 171 172 for (int i = 1; i < r->n; ++i) { 173 Value *exprValue = codegen(r->elts[i], Ty); 174 175 switch (r->type) { 176 case clast_red_min: { 177 Value *cmp = Builder.CreateICmpSLT(old, exprValue); 178 old = Builder.CreateSelect(cmp, old, exprValue); 179 break; 180 } 181 case clast_red_max: { 182 Value *cmp = Builder.CreateICmpSGT(old, exprValue); 183 old = Builder.CreateSelect(cmp, old, exprValue); 184 break; 185 } 186 case clast_red_sum: 187 old = Builder.CreateAdd(old, exprValue); 188 break; 189 } 190 } 191 192 return old; 193 } 194 195 ClastExpCodeGen::ClastExpCodeGen(IRBuilder<> &B, CharMapT &IVMap) 196 : Builder(B), IVS(IVMap) {} 197 198 Value *ClastExpCodeGen::codegen(const clast_expr *e, Type *Ty) { 199 switch (e->type) { 200 case clast_expr_name: 201 return codegen((const clast_name *)e, Ty); 202 case clast_expr_term: 203 return codegen((const clast_term *)e, Ty); 204 case clast_expr_bin: 205 return codegen((const clast_binary *)e, Ty); 206 case clast_expr_red: 207 return codegen((const clast_reduction *)e, Ty); 208 } 209 210 llvm_unreachable("Unknown clast expression!"); 211 } 212 213 class ClastStmtCodeGen { 214 public: 215 const std::vector<std::string> &getParallelLoops(); 216 217 private: 218 // The Scop we code generate. 219 Scop *S; 220 Pass *P; 221 222 // The Builder specifies the current location to code generate at. 223 IRBuilder<> &Builder; 224 225 // Map the Values from the old code to their counterparts in the new code. 226 ValueMapT ValueMap; 227 228 // Map the loops from the old code to expressions function of the induction 229 // variables in the new code. For example, when the code generator produces 230 // this AST: 231 // 232 // for (int c1 = 0; c1 <= 1023; c1 += 1) 233 // for (int c2 = 0; c2 <= 1023; c2 += 1) 234 // Stmt(c2 + 3, c1); 235 // 236 // LoopToScev is a map associating: 237 // "outer loop in the old loop nest" -> SCEV("c2 + 3"), 238 // "inner loop in the old loop nest" -> SCEV("c1"). 239 LoopToScevMapT LoopToScev; 240 241 // clastVars maps from the textual representation of a clast variable to its 242 // current *Value. clast variables are scheduling variables, original 243 // induction variables or parameters. They are used either in loop bounds or 244 // to define the statement instance that is executed. 245 // 246 // for (s = 0; s < n + 3; ++i) 247 // for (t = s; t < m; ++j) 248 // Stmt(i = s + 3 * m, j = t); 249 // 250 // {s,t,i,j,n,m} is the set of clast variables in this clast. 251 CharMapT ClastVars; 252 253 // Codegenerator for clast expressions. 254 ClastExpCodeGen ExpGen; 255 256 // Do we currently generate parallel code? 257 bool parallelCodeGeneration; 258 259 std::vector<std::string> parallelLoops; 260 261 void codegen(const clast_assignment *a); 262 263 void codegen(const clast_assignment *a, ScopStmt *Statement, 264 unsigned Dimension, int vectorDim, 265 std::vector<ValueMapT> *VectorVMap = 0, 266 std::vector<LoopToScevMapT> *VLTS = 0); 267 268 void codegenSubstitutions(const clast_stmt *Assignment, ScopStmt *Statement, 269 int vectorDim = 0, 270 std::vector<ValueMapT> *VectorVMap = 0, 271 std::vector<LoopToScevMapT> *VLTS = 0); 272 273 void codegen(const clast_user_stmt *u, std::vector<Value *> *IVS = NULL, 274 const char *iterator = NULL, isl_set *scatteringDomain = 0); 275 276 void codegen(const clast_block *b); 277 278 /// @brief Create a classical sequential loop. 279 void codegenForSequential(const clast_for *f); 280 281 /// @brief Create OpenMP structure values. 282 /// 283 /// Create a list of values that has to be stored into the OpenMP subfuncition 284 /// structure. 285 SetVector<Value *> getOMPValues(const clast_stmt *Body); 286 287 /// @brief Update ClastVars and ValueMap according to a value map. 288 /// 289 /// @param VMap A map from old to new values. 290 void updateWithValueMap(OMPGenerator::ValueToValueMapTy &VMap); 291 292 /// @brief Create an OpenMP parallel for loop. 293 /// 294 /// This loop reflects a loop as if it would have been created by an OpenMP 295 /// statement. 296 void codegenForOpenMP(const clast_for *f); 297 298 #ifdef GPU_CODEGEN 299 /// @brief Create GPGPU device memory access values. 300 /// 301 /// Create a list of values that will be set to be parameters of the GPGPU 302 /// subfunction. These parameters represent device memory base addresses 303 /// and the size in bytes. 304 SetVector<Value *> getGPUValues(unsigned &OutputBytes); 305 306 /// @brief Create a GPU parallel for loop. 307 /// 308 /// This loop reflects a loop as if it would have been created by a GPU 309 /// statement. 310 void codegenForGPGPU(const clast_for *F); 311 312 /// @brief Get innermost for loop. 313 const clast_stmt * 314 getScheduleInfo(const clast_for *F, std::vector<int> &NumIters, 315 unsigned &LoopDepth, unsigned &NonPLoopDepth); 316 #endif /* GPU_CODEGEN */ 317 318 /// @brief Check if a loop is parallel 319 /// 320 /// Detect if a clast_for loop can be executed in parallel. 321 /// 322 /// @param For The clast for loop to check. 323 /// 324 /// @return bool Returns true if the incoming clast_for statement can 325 /// execute in parallel. 326 bool isParallelFor(const clast_for *For); 327 328 bool isInnermostLoop(const clast_for *f); 329 330 /// @brief Get the number of loop iterations for this loop. 331 /// @param f The clast for loop to check. 332 int getNumberOfIterations(const clast_for *f); 333 334 /// @brief Create vector instructions for this loop. 335 void codegenForVector(const clast_for *f); 336 337 void codegen(const clast_for *f); 338 339 Value *codegen(const clast_equation *eq); 340 341 void codegen(const clast_guard *g); 342 343 void codegen(const clast_stmt *stmt); 344 345 void addParameters(const CloogNames *names); 346 347 IntegerType *getIntPtrTy(); 348 349 public: 350 void codegen(const clast_root *r); 351 352 ClastStmtCodeGen(Scop *scop, IRBuilder<> &B, Pass *P); 353 }; 354 } 355 356 IntegerType *ClastStmtCodeGen::getIntPtrTy() { 357 return P->getAnalysis<DataLayout>().getIntPtrType(Builder.getContext()); 358 } 359 360 const std::vector<std::string> &ClastStmtCodeGen::getParallelLoops() { 361 return parallelLoops; 362 } 363 364 void ClastStmtCodeGen::codegen(const clast_assignment *a) { 365 Value *V = ExpGen.codegen(a->RHS, getIntPtrTy()); 366 ClastVars[a->LHS] = V; 367 } 368 369 void ClastStmtCodeGen::codegen( 370 const clast_assignment *A, ScopStmt *Stmt, unsigned Dim, int VectorDim, 371 std::vector<ValueMapT> *VectorVMap, std::vector<LoopToScevMapT> *VLTS) { 372 Value *RHS; 373 374 assert(!A->LHS && "Statement assignments do not have left hand side"); 375 376 RHS = ExpGen.codegen(A->RHS, Builder.getInt64Ty()); 377 378 const llvm::SCEV *URHS = S->getSE()->getUnknown(RHS); 379 if (VLTS) 380 (*VLTS)[VectorDim][Stmt->getLoopForDimension(Dim)] = URHS; 381 LoopToScev[Stmt->getLoopForDimension(Dim)] = URHS; 382 383 const PHINode *PN = Stmt->getInductionVariableForDimension(Dim); 384 if (PN) { 385 RHS = Builder.CreateTruncOrBitCast(RHS, PN->getType()); 386 387 if (VectorVMap) 388 (*VectorVMap)[VectorDim][PN] = RHS; 389 390 ValueMap[PN] = RHS; 391 } 392 } 393 394 void ClastStmtCodeGen::codegenSubstitutions( 395 const clast_stmt *Assignment, ScopStmt *Statement, int vectorDim, 396 std::vector<ValueMapT> *VectorVMap, std::vector<LoopToScevMapT> *VLTS) { 397 int Dimension = 0; 398 399 while (Assignment) { 400 assert(CLAST_STMT_IS_A(Assignment, stmt_ass) && 401 "Substitions are expected to be assignments"); 402 codegen((const clast_assignment *)Assignment, Statement, Dimension, 403 vectorDim, VectorVMap, VLTS); 404 Assignment = Assignment->next; 405 Dimension++; 406 } 407 } 408 409 // Takes the cloog specific domain and translates it into a map Statement -> 410 // PartialSchedule, where the PartialSchedule contains all the dimensions that 411 // have been code generated up to this point. 412 static __isl_give isl_map * 413 extractPartialSchedule(ScopStmt *Statement, isl_set *Domain) { 414 isl_map *Schedule = Statement->getScattering(); 415 int ScheduledDimensions = isl_set_dim(Domain, isl_dim_set); 416 int UnscheduledDimensions = 417 isl_map_dim(Schedule, isl_dim_out) - ScheduledDimensions; 418 419 return isl_map_project_out(Schedule, isl_dim_out, ScheduledDimensions, 420 UnscheduledDimensions); 421 } 422 423 void ClastStmtCodeGen::codegen(const clast_user_stmt *u, 424 std::vector<Value *> *IVS, const char *iterator, 425 isl_set *Domain) { 426 ScopStmt *Statement = (ScopStmt *)u->statement->usr; 427 428 if (u->substitutions) 429 codegenSubstitutions(u->substitutions, Statement); 430 431 int VectorDimensions = IVS ? IVS->size() : 1; 432 433 if (VectorDimensions == 1) { 434 BlockGenerator::generate(Builder, *Statement, ValueMap, LoopToScev, P); 435 return; 436 } 437 438 VectorValueMapT VectorMap(VectorDimensions); 439 std::vector<LoopToScevMapT> VLTS(VectorDimensions); 440 441 if (IVS) { 442 assert(u->substitutions && "Substitutions expected!"); 443 int i = 0; 444 for (std::vector<Value *>::iterator II = IVS->begin(), IE = IVS->end(); 445 II != IE; ++II) { 446 ClastVars[iterator] = *II; 447 codegenSubstitutions(u->substitutions, Statement, i, &VectorMap, &VLTS); 448 i++; 449 } 450 } 451 452 isl_map *Schedule = extractPartialSchedule(Statement, Domain); 453 VectorBlockGenerator::generate(Builder, *Statement, VectorMap, VLTS, Schedule, 454 P); 455 isl_map_free(Schedule); 456 } 457 458 void ClastStmtCodeGen::codegen(const clast_block *b) { 459 if (b->body) 460 codegen(b->body); 461 } 462 463 void ClastStmtCodeGen::codegenForSequential(const clast_for *f) { 464 Value *LowerBound, *UpperBound, *IV, *Stride; 465 BasicBlock *AfterBB; 466 Type *IntPtrTy = getIntPtrTy(); 467 468 LowerBound = ExpGen.codegen(f->LB, IntPtrTy); 469 UpperBound = ExpGen.codegen(f->UB, IntPtrTy); 470 Stride = Builder.getInt(APInt_from_MPZ(f->stride)); 471 472 IV = createLoop(LowerBound, UpperBound, Stride, Builder, P, AfterBB, 473 CmpInst::ICMP_SLE); 474 475 // Add loop iv to symbols. 476 ClastVars[f->iterator] = IV; 477 478 if (f->body) 479 codegen(f->body); 480 481 // Loop is finished, so remove its iv from the live symbols. 482 ClastVars.erase(f->iterator); 483 Builder.SetInsertPoint(AfterBB->begin()); 484 } 485 486 // Helper class to determine all scalar parameters used in the basic blocks of a 487 // clast. Scalar parameters are scalar variables defined outside of the SCoP. 488 class ParameterVisitor : public ClastVisitor { 489 std::set<Value *> Values; 490 public: 491 ParameterVisitor() : ClastVisitor(), Values() {} 492 493 void visitUser(const clast_user_stmt *Stmt) { 494 const ScopStmt *S = static_cast<const ScopStmt *>(Stmt->statement->usr); 495 const BasicBlock *BB = S->getBasicBlock(); 496 497 // Check all the operands of instructions in the basic block. 498 for (BasicBlock::const_iterator BI = BB->begin(), BE = BB->end(); BI != BE; 499 ++BI) { 500 const Instruction &Inst = *BI; 501 for (Instruction::const_op_iterator II = Inst.op_begin(), 502 IE = Inst.op_end(); 503 II != IE; ++II) { 504 Value *SrcVal = *II; 505 506 if (Instruction *OpInst = dyn_cast<Instruction>(SrcVal)) 507 if (S->getParent()->getRegion().contains(OpInst)) 508 continue; 509 510 if (isa<Instruction>(SrcVal) || isa<Argument>(SrcVal)) 511 Values.insert(SrcVal); 512 } 513 } 514 } 515 516 // Iterator to iterate over the values found. 517 typedef std::set<Value *>::const_iterator const_iterator; 518 inline const_iterator begin() const { return Values.begin(); } 519 inline const_iterator end() const { return Values.end(); } 520 }; 521 522 SetVector<Value *> ClastStmtCodeGen::getOMPValues(const clast_stmt *Body) { 523 SetVector<Value *> Values; 524 525 // The clast variables 526 for (CharMapT::iterator I = ClastVars.begin(), E = ClastVars.end(); I != E; 527 I++) 528 Values.insert(I->second); 529 530 // Find the temporaries that are referenced in the clast statements' 531 // basic blocks but are not defined by these blocks (e.g., references 532 // to function arguments or temporaries defined before the start of 533 // the SCoP). 534 ParameterVisitor Params; 535 Params.visit(Body); 536 537 for (ParameterVisitor::const_iterator PI = Params.begin(), PE = Params.end(); 538 PI != PE; ++PI) { 539 Value *V = *PI; 540 Values.insert(V); 541 DEBUG(dbgs() << "Adding temporary for OMP copy-in: " << *V << "\n"); 542 } 543 544 return Values; 545 } 546 547 void ClastStmtCodeGen::updateWithValueMap( 548 OMPGenerator::ValueToValueMapTy &VMap) { 549 std::set<Value *> Inserted; 550 551 for (CharMapT::iterator I = ClastVars.begin(), E = ClastVars.end(); I != E; 552 I++) { 553 ClastVars[I->first] = VMap[I->second]; 554 Inserted.insert(I->second); 555 } 556 557 for (OMPGenerator::ValueToValueMapTy::iterator I = VMap.begin(), 558 E = VMap.end(); 559 I != E; ++I) { 560 if (Inserted.count(I->first)) 561 continue; 562 563 ValueMap[I->first] = I->second; 564 } 565 } 566 567 static void clearDomtree(Function *F, DominatorTree &DT) { 568 DomTreeNode *N = DT.getNode(&F->getEntryBlock()); 569 std::vector<BasicBlock *> Nodes; 570 for (po_iterator<DomTreeNode *> I = po_begin(N), E = po_end(N); I != E; ++I) 571 Nodes.push_back(I->getBlock()); 572 573 for (std::vector<BasicBlock *>::iterator I = Nodes.begin(), E = Nodes.end(); 574 I != E; ++I) 575 DT.eraseNode(*I); 576 } 577 578 void ClastStmtCodeGen::codegenForOpenMP(const clast_for *For) { 579 Value *Stride, *LB, *UB, *IV; 580 BasicBlock::iterator LoopBody; 581 IntegerType *IntPtrTy = getIntPtrTy(); 582 SetVector<Value *> Values; 583 OMPGenerator::ValueToValueMapTy VMap; 584 OMPGenerator OMPGen(Builder, P); 585 586 Stride = Builder.getInt(APInt_from_MPZ(For->stride)); 587 Stride = Builder.CreateSExtOrBitCast(Stride, IntPtrTy); 588 LB = ExpGen.codegen(For->LB, IntPtrTy); 589 UB = ExpGen.codegen(For->UB, IntPtrTy); 590 591 Values = getOMPValues(For->body); 592 593 IV = OMPGen.createParallelLoop(LB, UB, Stride, Values, VMap, &LoopBody); 594 BasicBlock::iterator AfterLoop = Builder.GetInsertPoint(); 595 Builder.SetInsertPoint(LoopBody); 596 597 // Save the current values. 598 const ValueMapT ValueMapCopy = ValueMap; 599 const CharMapT ClastVarsCopy = ClastVars; 600 601 updateWithValueMap(VMap); 602 ClastVars[For->iterator] = IV; 603 604 if (For->body) 605 codegen(For->body); 606 607 // Restore the original values. 608 ValueMap = ValueMapCopy; 609 ClastVars = ClastVarsCopy; 610 611 clearDomtree((*LoopBody).getParent()->getParent(), 612 P->getAnalysis<DominatorTree>()); 613 614 Builder.SetInsertPoint(AfterLoop); 615 } 616 617 #ifdef GPU_CODEGEN 618 static unsigned getArraySizeInBytes(const ArrayType *AT) { 619 unsigned Bytes = AT->getNumElements(); 620 if (const ArrayType *T = dyn_cast<ArrayType>(AT->getElementType())) 621 Bytes *= getArraySizeInBytes(T); 622 else 623 Bytes *= AT->getElementType()->getPrimitiveSizeInBits() / 8; 624 625 return Bytes; 626 } 627 628 SetVector<Value *> ClastStmtCodeGen::getGPUValues(unsigned &OutputBytes) { 629 SetVector<Value *> Values; 630 OutputBytes = 0; 631 632 // Record the memory reference base addresses. 633 for (Scop::iterator SI = S->begin(), SE = S->end(); SI != SE; ++SI) { 634 ScopStmt *Stmt = *SI; 635 for (SmallVector<MemoryAccess *, 8>::iterator I = Stmt->memacc_begin(), 636 E = Stmt->memacc_end(); 637 I != E; ++I) { 638 Value *BaseAddr = const_cast<Value *>((*I)->getBaseAddr()); 639 Values.insert((BaseAddr)); 640 641 // FIXME: we assume that there is one and only one array to be written 642 // in a SCoP. 643 int NumWrites = 0; 644 if ((*I)->isWrite()) { 645 ++NumWrites; 646 assert(NumWrites <= 1 && 647 "We support at most one array to be written in a SCoP."); 648 if (const PointerType *PT = 649 dyn_cast<PointerType>(BaseAddr->getType())) { 650 Type *T = PT->getArrayElementType(); 651 const ArrayType *ATy = dyn_cast<ArrayType>(T); 652 OutputBytes = getArraySizeInBytes(ATy); 653 } 654 } 655 } 656 } 657 658 return Values; 659 } 660 661 const clast_stmt *ClastStmtCodeGen::getScheduleInfo( 662 const clast_for *F, std::vector<int> &NumIters, unsigned &LoopDepth, 663 unsigned &NonPLoopDepth) { 664 clast_stmt *Stmt = (clast_stmt *)F; 665 const clast_for *Result; 666 bool NonParaFlag = false; 667 LoopDepth = 0; 668 NonPLoopDepth = 0; 669 670 while (Stmt) { 671 if (CLAST_STMT_IS_A(Stmt, stmt_for)) { 672 const clast_for *T = (clast_for *)Stmt; 673 if (isParallelFor(T)) { 674 if (!NonParaFlag) { 675 NumIters.push_back(getNumberOfIterations(T)); 676 Result = T; 677 } 678 } else 679 NonParaFlag = true; 680 681 Stmt = T->body; 682 LoopDepth++; 683 continue; 684 } 685 Stmt = Stmt->next; 686 } 687 688 assert(NumIters.size() == 4 && 689 "The loops should be tiled into 4-depth parallel loops and an " 690 "innermost non-parallel one (if exist)."); 691 NonPLoopDepth = LoopDepth - NumIters.size(); 692 assert(NonPLoopDepth <= 1 && 693 "We support only one innermost non-parallel loop currently."); 694 return (const clast_stmt *)Result->body; 695 } 696 697 void ClastStmtCodeGen::codegenForGPGPU(const clast_for *F) { 698 BasicBlock::iterator LoopBody; 699 SetVector<Value *> Values; 700 SetVector<Value *> IVS; 701 std::vector<int> NumIterations; 702 PTXGenerator::ValueToValueMapTy VMap; 703 704 assert(!GPUTriple.empty() && 705 "Target triple should be set properly for GPGPU code generation."); 706 PTXGenerator PTXGen(Builder, P, GPUTriple); 707 708 // Get original IVS and ScopStmt 709 unsigned TiledLoopDepth, NonPLoopDepth; 710 const clast_stmt *InnerStmt = 711 getScheduleInfo(F, NumIterations, TiledLoopDepth, NonPLoopDepth); 712 const clast_stmt *TmpStmt; 713 const clast_user_stmt *U; 714 const clast_for *InnerFor; 715 if (CLAST_STMT_IS_A(InnerStmt, stmt_for)) { 716 InnerFor = (const clast_for *)InnerStmt; 717 TmpStmt = InnerFor->body; 718 } else 719 TmpStmt = InnerStmt; 720 U = (const clast_user_stmt *)TmpStmt; 721 ScopStmt *Statement = (ScopStmt *)U->statement->usr; 722 for (unsigned i = 0; i < Statement->getNumIterators() - NonPLoopDepth; i++) { 723 const Value *IV = Statement->getInductionVariableForDimension(i); 724 IVS.insert(const_cast<Value *>(IV)); 725 } 726 727 unsigned OutBytes; 728 Values = getGPUValues(OutBytes); 729 PTXGen.setOutputBytes(OutBytes); 730 PTXGen.startGeneration(Values, IVS, VMap, &LoopBody); 731 732 BasicBlock::iterator AfterLoop = Builder.GetInsertPoint(); 733 Builder.SetInsertPoint(LoopBody); 734 735 BasicBlock *AfterBB = 0; 736 if (NonPLoopDepth) { 737 Value *LowerBound, *UpperBound, *IV, *Stride; 738 Type *IntPtrTy = getIntPtrTy(); 739 LowerBound = ExpGen.codegen(InnerFor->LB, IntPtrTy); 740 UpperBound = ExpGen.codegen(InnerFor->UB, IntPtrTy); 741 Stride = Builder.getInt(APInt_from_MPZ(InnerFor->stride)); 742 IV = createLoop(LowerBound, UpperBound, Stride, Builder, P, AfterBB, 743 CmpInst::ICMP_SLE); 744 const Value *OldIV_ = Statement->getInductionVariableForDimension(2); 745 Value *OldIV = const_cast<Value *>(OldIV_); 746 VMap.insert(std::make_pair<Value *, Value *>(OldIV, IV)); 747 } 748 749 updateWithValueMap(VMap); 750 751 BlockGenerator::generate(Builder, *Statement, ValueMap, P); 752 753 if (AfterBB) 754 Builder.SetInsertPoint(AfterBB->begin()); 755 756 // FIXME: The replacement of the host base address with the parameter of ptx 757 // subfunction should have been done by updateWithValueMap. We use the 758 // following codes to avoid affecting other parts of Polly. This should be 759 // fixed later. 760 Function *FN = Builder.GetInsertBlock()->getParent(); 761 for (unsigned j = 0; j < Values.size(); j++) { 762 Value *baseAddr = Values[j]; 763 for (Function::iterator B = FN->begin(); B != FN->end(); ++B) { 764 for (BasicBlock::iterator I = B->begin(); I != B->end(); ++I) 765 I->replaceUsesOfWith(baseAddr, ValueMap[baseAddr]); 766 } 767 } 768 Builder.SetInsertPoint(AfterLoop); 769 PTXGen.setLaunchingParameters(NumIterations[0], NumIterations[1], 770 NumIterations[2], NumIterations[3]); 771 PTXGen.finishGeneration(FN); 772 } 773 #endif 774 775 bool ClastStmtCodeGen::isInnermostLoop(const clast_for *f) { 776 const clast_stmt *stmt = f->body; 777 778 while (stmt) { 779 if (!CLAST_STMT_IS_A(stmt, stmt_user)) 780 return false; 781 782 stmt = stmt->next; 783 } 784 785 return true; 786 } 787 788 int ClastStmtCodeGen::getNumberOfIterations(const clast_for *For) { 789 isl_set *LoopDomain = isl_set_copy(isl_set_from_cloog_domain(For->domain)); 790 int NumberOfIterations = polly::getNumberOfIterations(LoopDomain); 791 if (NumberOfIterations == -1) 792 return -1; 793 return NumberOfIterations / isl_int_get_si(For->stride) + 1; 794 } 795 796 void ClastStmtCodeGen::codegenForVector(const clast_for *F) { 797 DEBUG(dbgs() << "Vectorizing loop '" << F->iterator << "'\n";); 798 int VectorWidth = getNumberOfIterations(F); 799 800 Value *LB = ExpGen.codegen(F->LB, getIntPtrTy()); 801 802 APInt Stride = APInt_from_MPZ(F->stride); 803 IntegerType *LoopIVType = dyn_cast<IntegerType>(LB->getType()); 804 Stride = Stride.zext(LoopIVType->getBitWidth()); 805 Value *StrideValue = ConstantInt::get(LoopIVType, Stride); 806 807 std::vector<Value *> IVS(VectorWidth); 808 IVS[0] = LB; 809 810 for (int i = 1; i < VectorWidth; i++) 811 IVS[i] = Builder.CreateAdd(IVS[i - 1], StrideValue, "p_vector_iv"); 812 813 isl_set *Domain = isl_set_from_cloog_domain(F->domain); 814 815 // Add loop iv to symbols. 816 ClastVars[F->iterator] = LB; 817 818 const clast_stmt *Stmt = F->body; 819 820 while (Stmt) { 821 codegen((const clast_user_stmt *)Stmt, &IVS, F->iterator, 822 isl_set_copy(Domain)); 823 Stmt = Stmt->next; 824 } 825 826 // Loop is finished, so remove its iv from the live symbols. 827 isl_set_free(Domain); 828 ClastVars.erase(F->iterator); 829 } 830 831 bool ClastStmtCodeGen::isParallelFor(const clast_for *f) { 832 isl_set *Domain = isl_set_from_cloog_domain(f->domain); 833 assert(Domain && "Cannot access domain of loop"); 834 835 Dependences &D = P->getAnalysis<Dependences>(); 836 837 return D.isParallelDimension(isl_set_copy(Domain), isl_set_n_dim(Domain)); 838 } 839 840 void ClastStmtCodeGen::codegen(const clast_for *f) { 841 bool Vector = PollyVectorizerChoice != VECTORIZER_NONE; 842 if ((Vector || OpenMP) && isParallelFor(f)) { 843 if (Vector && isInnermostLoop(f) && (-1 != getNumberOfIterations(f)) && 844 (getNumberOfIterations(f) <= 16)) { 845 codegenForVector(f); 846 return; 847 } 848 849 if (OpenMP && !parallelCodeGeneration) { 850 parallelCodeGeneration = true; 851 parallelLoops.push_back(f->iterator); 852 codegenForOpenMP(f); 853 parallelCodeGeneration = false; 854 return; 855 } 856 } 857 858 #ifdef GPU_CODEGEN 859 if (GPGPU && isParallelFor(f)) { 860 if (!parallelCodeGeneration) { 861 parallelCodeGeneration = true; 862 parallelLoops.push_back(f->iterator); 863 codegenForGPGPU(f); 864 parallelCodeGeneration = false; 865 return; 866 } 867 } 868 #endif 869 870 codegenForSequential(f); 871 } 872 873 Value *ClastStmtCodeGen::codegen(const clast_equation *eq) { 874 Value *LHS = ExpGen.codegen(eq->LHS, getIntPtrTy()); 875 Value *RHS = ExpGen.codegen(eq->RHS, getIntPtrTy()); 876 CmpInst::Predicate P; 877 878 if (eq->sign == 0) 879 P = ICmpInst::ICMP_EQ; 880 else if (eq->sign > 0) 881 P = ICmpInst::ICMP_SGE; 882 else 883 P = ICmpInst::ICMP_SLE; 884 885 return Builder.CreateICmp(P, LHS, RHS); 886 } 887 888 void ClastStmtCodeGen::codegen(const clast_guard *g) { 889 Function *F = Builder.GetInsertBlock()->getParent(); 890 LLVMContext &Context = F->getContext(); 891 892 BasicBlock *CondBB = 893 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), P); 894 CondBB->setName("polly.cond"); 895 BasicBlock *MergeBB = SplitBlock(CondBB, CondBB->begin(), P); 896 MergeBB->setName("polly.merge"); 897 BasicBlock *ThenBB = BasicBlock::Create(Context, "polly.then", F); 898 899 DominatorTree &DT = P->getAnalysis<DominatorTree>(); 900 DT.addNewBlock(ThenBB, CondBB); 901 DT.changeImmediateDominator(MergeBB, CondBB); 902 903 CondBB->getTerminator()->eraseFromParent(); 904 905 Builder.SetInsertPoint(CondBB); 906 907 Value *Predicate = codegen(&(g->eq[0])); 908 909 for (int i = 1; i < g->n; ++i) { 910 Value *TmpPredicate = codegen(&(g->eq[i])); 911 Predicate = Builder.CreateAnd(Predicate, TmpPredicate); 912 } 913 914 Builder.CreateCondBr(Predicate, ThenBB, MergeBB); 915 Builder.SetInsertPoint(ThenBB); 916 Builder.CreateBr(MergeBB); 917 Builder.SetInsertPoint(ThenBB->begin()); 918 919 codegen(g->then); 920 921 Builder.SetInsertPoint(MergeBB->begin()); 922 } 923 924 void ClastStmtCodeGen::codegen(const clast_stmt *stmt) { 925 if (CLAST_STMT_IS_A(stmt, stmt_root)) 926 assert(false && "No second root statement expected"); 927 else if (CLAST_STMT_IS_A(stmt, stmt_ass)) 928 codegen((const clast_assignment *)stmt); 929 else if (CLAST_STMT_IS_A(stmt, stmt_user)) 930 codegen((const clast_user_stmt *)stmt); 931 else if (CLAST_STMT_IS_A(stmt, stmt_block)) 932 codegen((const clast_block *)stmt); 933 else if (CLAST_STMT_IS_A(stmt, stmt_for)) 934 codegen((const clast_for *)stmt); 935 else if (CLAST_STMT_IS_A(stmt, stmt_guard)) 936 codegen((const clast_guard *)stmt); 937 938 if (stmt->next) 939 codegen(stmt->next); 940 } 941 942 void ClastStmtCodeGen::addParameters(const CloogNames *names) { 943 SCEVExpander Rewriter(P->getAnalysis<ScalarEvolution>(), "polly"); 944 945 int i = 0; 946 for (Scop::param_iterator PI = S->param_begin(), PE = S->param_end(); 947 PI != PE; ++PI) { 948 assert(i < names->nb_parameters && "Not enough parameter names"); 949 950 const SCEV *Param = *PI; 951 Type *Ty = Param->getType(); 952 953 Instruction *insertLocation = --(Builder.GetInsertBlock()->end()); 954 Value *V = Rewriter.expandCodeFor(Param, Ty, insertLocation); 955 ClastVars[names->parameters[i]] = V; 956 957 ++i; 958 } 959 } 960 961 void ClastStmtCodeGen::codegen(const clast_root *r) { 962 addParameters(r->names); 963 964 parallelCodeGeneration = false; 965 966 const clast_stmt *stmt = (const clast_stmt *)r; 967 if (stmt->next) 968 codegen(stmt->next); 969 } 970 971 ClastStmtCodeGen::ClastStmtCodeGen(Scop *scop, IRBuilder<> &B, Pass *P) 972 : S(scop), P(P), Builder(B), ExpGen(Builder, ClastVars) {} 973 974 namespace { 975 class CodeGeneration : public ScopPass { 976 std::vector<std::string> ParallelLoops; 977 978 public: 979 static char ID; 980 981 CodeGeneration() : ScopPass(ID) {} 982 983 bool runOnScop(Scop &S) { 984 ParallelLoops.clear(); 985 986 assert(S.getRegion().isSimple() && "Only simple regions are supported"); 987 988 BasicBlock *StartBlock = executeScopConditionally(S, this); 989 990 IRBuilder<> Builder(StartBlock->begin()); 991 992 ClastStmtCodeGen CodeGen(&S, Builder, this); 993 CloogInfo &C = getAnalysis<CloogInfo>(); 994 CodeGen.codegen(C.getClast()); 995 996 ParallelLoops.insert(ParallelLoops.begin(), 997 CodeGen.getParallelLoops().begin(), 998 CodeGen.getParallelLoops().end()); 999 return true; 1000 } 1001 1002 virtual void printScop(raw_ostream &OS) const { 1003 for (std::vector<std::string>::const_iterator PI = ParallelLoops.begin(), 1004 PE = ParallelLoops.end(); 1005 PI != PE; ++PI) 1006 OS << "Parallel loop with iterator '" << *PI << "' generated\n"; 1007 } 1008 1009 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1010 AU.addRequired<CloogInfo>(); 1011 AU.addRequired<Dependences>(); 1012 AU.addRequired<DominatorTree>(); 1013 AU.addRequired<RegionInfo>(); 1014 AU.addRequired<ScalarEvolution>(); 1015 AU.addRequired<ScopDetection>(); 1016 AU.addRequired<ScopInfo>(); 1017 AU.addRequired<DataLayout>(); 1018 AU.addRequired<LoopInfo>(); 1019 1020 AU.addPreserved<CloogInfo>(); 1021 AU.addPreserved<Dependences>(); 1022 1023 // FIXME: We do not create LoopInfo for the newly generated loops. 1024 AU.addPreserved<LoopInfo>(); 1025 AU.addPreserved<DominatorTree>(); 1026 AU.addPreserved<ScopDetection>(); 1027 AU.addPreserved<ScalarEvolution>(); 1028 1029 // FIXME: We do not yet add regions for the newly generated code to the 1030 // region tree. 1031 AU.addPreserved<RegionInfo>(); 1032 AU.addPreserved<TempScopInfo>(); 1033 AU.addPreserved<ScopInfo>(); 1034 AU.addPreservedID(IndependentBlocksID); 1035 } 1036 }; 1037 } 1038 1039 char CodeGeneration::ID = 1; 1040 1041 Pass *polly::createCodeGenerationPass() { return new CodeGeneration(); } 1042 1043 INITIALIZE_PASS_BEGIN(CodeGeneration, "polly-codegen", 1044 "Polly - Create LLVM-IR from SCoPs", false, false); 1045 INITIALIZE_PASS_DEPENDENCY(CloogInfo); 1046 INITIALIZE_PASS_DEPENDENCY(Dependences); 1047 INITIALIZE_PASS_DEPENDENCY(DominatorTree); 1048 INITIALIZE_PASS_DEPENDENCY(RegionInfo); 1049 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution); 1050 INITIALIZE_PASS_DEPENDENCY(ScopDetection); 1051 INITIALIZE_PASS_DEPENDENCY(DataLayout); 1052 INITIALIZE_PASS_END(CodeGeneration, "polly-codegen", 1053 "Polly - Create LLVM-IR from SCoPs", false, false) 1054 1055 #endif // CLOOG_FOUND 1056