1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the BlockGenerator and VectorBlockGenerator classes, 11 // which generate sequential code and vectorized code for a polyhedral 12 // statement, respectively. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "polly/ScopInfo.h" 17 #include "polly/CodeGen/BlockGenerators.h" 18 #include "polly/CodeGen/CodeGeneration.h" 19 #include "polly/CodeGen/IslExprBuilder.h" 20 #include "polly/Options.h" 21 #include "polly/Support/GICHelper.h" 22 #include "polly/Support/SCEVValidator.h" 23 #include "polly/Support/ScopHelper.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/RegionInfo.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/Analysis/ScalarEvolutionExpander.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 31 #include "isl/aff.h" 32 #include "isl/ast.h" 33 #include "isl/ast_build.h" 34 #include "isl/set.h" 35 #include <deque> 36 37 using namespace llvm; 38 using namespace polly; 39 40 static cl::opt<bool> Aligned("enable-polly-aligned", 41 cl::desc("Assumed aligned memory accesses."), 42 cl::Hidden, cl::init(false), cl::ZeroOrMore, 43 cl::cat(PollyCategory)); 44 45 bool polly::canSynthesize(const Instruction *I, const llvm::LoopInfo *LI, 46 ScalarEvolution *SE, const Region *R) { 47 if (!I || !SE->isSCEVable(I->getType())) 48 return false; 49 50 if (const SCEV *Scev = SE->getSCEV(const_cast<Instruction *>(I))) 51 if (!isa<SCEVCouldNotCompute>(Scev)) 52 if (!hasScalarDepsInsideRegion(Scev, R)) 53 return true; 54 55 return false; 56 } 57 58 bool polly::isIgnoredIntrinsic(const Value *V) { 59 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 60 switch (IT->getIntrinsicID()) { 61 // Lifetime markers are supported/ignored. 62 case llvm::Intrinsic::lifetime_start: 63 case llvm::Intrinsic::lifetime_end: 64 // Invariant markers are supported/ignored. 65 case llvm::Intrinsic::invariant_start: 66 case llvm::Intrinsic::invariant_end: 67 // Some misc annotations are supported/ignored. 68 case llvm::Intrinsic::var_annotation: 69 case llvm::Intrinsic::ptr_annotation: 70 case llvm::Intrinsic::annotation: 71 case llvm::Intrinsic::donothing: 72 case llvm::Intrinsic::assume: 73 case llvm::Intrinsic::expect: 74 return true; 75 default: 76 break; 77 } 78 } 79 return false; 80 } 81 82 BlockGenerator::BlockGenerator(PollyIRBuilder &B, LoopInfo &LI, 83 ScalarEvolution &SE, DominatorTree &DT, 84 ScalarAllocaMapTy &ScalarMap, 85 ScalarAllocaMapTy &PHIOpMap, 86 EscapeUsersAllocaMapTy &EscapeMap, 87 IslExprBuilder *ExprBuilder) 88 : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT), 89 EntryBB(nullptr), PHIOpMap(PHIOpMap), ScalarMap(ScalarMap), 90 EscapeMap(EscapeMap) {} 91 92 Value *BlockGenerator::getNewValue(ScopStmt &Stmt, const Value *Old, 93 ValueMapT &BBMap, ValueMapT &GlobalMap, 94 LoopToScevMapT <S, Loop *L) const { 95 // We assume constants never change. 96 // This avoids map lookups for many calls to this function. 97 if (isa<Constant>(Old)) 98 return const_cast<Value *>(Old); 99 100 if (Value *New = GlobalMap.lookup(Old)) { 101 if (Old->getType()->getScalarSizeInBits() < 102 New->getType()->getScalarSizeInBits()) 103 New = Builder.CreateTruncOrBitCast(New, Old->getType()); 104 105 return New; 106 } 107 108 if (Value *New = BBMap.lookup(Old)) 109 return New; 110 111 if (SE.isSCEVable(Old->getType())) 112 if (const SCEV *Scev = SE.getSCEVAtScope(const_cast<Value *>(Old), L)) { 113 if (!isa<SCEVCouldNotCompute>(Scev)) { 114 const SCEV *NewScev = apply(Scev, LTS, SE); 115 ValueToValueMap VTV; 116 VTV.insert(BBMap.begin(), BBMap.end()); 117 VTV.insert(GlobalMap.begin(), GlobalMap.end()); 118 NewScev = SCEVParameterRewriter::rewrite(NewScev, SE, VTV); 119 SCEVExpander Expander(SE, Stmt.getParent() 120 ->getRegion() 121 .getEntry() 122 ->getParent() 123 ->getParent() 124 ->getDataLayout(), 125 "polly"); 126 Value *Expanded = Expander.expandCodeFor(NewScev, Old->getType(), 127 Builder.GetInsertPoint()); 128 129 BBMap[Old] = Expanded; 130 return Expanded; 131 } 132 } 133 134 // A scop-constant value defined by a global or a function parameter. 135 if (isa<GlobalValue>(Old) || isa<Argument>(Old)) 136 return const_cast<Value *>(Old); 137 138 // A scop-constant value defined by an instruction executed outside the scop. 139 if (const Instruction *Inst = dyn_cast<Instruction>(Old)) 140 if (!Stmt.getParent()->getRegion().contains(Inst->getParent())) 141 return const_cast<Value *>(Old); 142 143 // The scalar dependence is neither available nor SCEVCodegenable. 144 llvm_unreachable("Unexpected scalar dependence in region!"); 145 return nullptr; 146 } 147 148 void BlockGenerator::copyInstScalar(ScopStmt &Stmt, const Instruction *Inst, 149 ValueMapT &BBMap, ValueMapT &GlobalMap, 150 LoopToScevMapT <S) { 151 // We do not generate debug intrinsics as we did not investigate how to 152 // copy them correctly. At the current state, they just crash the code 153 // generation as the meta-data operands are not correctly copied. 154 if (isa<DbgInfoIntrinsic>(Inst)) 155 return; 156 157 Instruction *NewInst = Inst->clone(); 158 159 // Replace old operands with the new ones. 160 for (Value *OldOperand : Inst->operands()) { 161 Value *NewOperand = getNewValue(Stmt, OldOperand, BBMap, GlobalMap, LTS, 162 getLoopForInst(Inst)); 163 164 if (!NewOperand) { 165 assert(!isa<StoreInst>(NewInst) && 166 "Store instructions are always needed!"); 167 delete NewInst; 168 return; 169 } 170 171 NewInst->replaceUsesOfWith(OldOperand, NewOperand); 172 } 173 174 Builder.Insert(NewInst); 175 BBMap[Inst] = NewInst; 176 177 if (!NewInst->getType()->isVoidTy()) 178 NewInst->setName("p_" + Inst->getName()); 179 } 180 181 Value *BlockGenerator::getNewAccessOperand(ScopStmt &Stmt, 182 const MemoryAccess &MA) { 183 isl_pw_multi_aff *PWAccRel; 184 isl_union_map *Schedule; 185 isl_ast_expr *Expr; 186 isl_ast_build *Build = Stmt.getAstBuild(); 187 188 assert(ExprBuilder && Build && 189 "Cannot generate new value without IslExprBuilder!"); 190 191 Schedule = isl_ast_build_get_schedule(Build); 192 PWAccRel = MA.applyScheduleToAccessRelation(Schedule); 193 194 Expr = isl_ast_build_access_from_pw_multi_aff(Build, PWAccRel); 195 Expr = isl_ast_expr_address_of(Expr); 196 197 return ExprBuilder->create(Expr); 198 } 199 200 Value *BlockGenerator::generateLocationAccessed( 201 ScopStmt &Stmt, const Instruction *Inst, const Value *Pointer, 202 ValueMapT &BBMap, ValueMapT &GlobalMap, LoopToScevMapT <S) { 203 const MemoryAccess &MA = Stmt.getAccessFor(Inst); 204 205 Value *NewPointer; 206 if (MA.hasNewAccessRelation()) 207 NewPointer = getNewAccessOperand(Stmt, MA); 208 else 209 NewPointer = 210 getNewValue(Stmt, Pointer, BBMap, GlobalMap, LTS, getLoopForInst(Inst)); 211 212 return NewPointer; 213 } 214 215 Loop *BlockGenerator::getLoopForInst(const llvm::Instruction *Inst) { 216 return LI.getLoopFor(Inst->getParent()); 217 } 218 219 Value *BlockGenerator::generateScalarLoad(ScopStmt &Stmt, const LoadInst *Load, 220 ValueMapT &BBMap, 221 ValueMapT &GlobalMap, 222 LoopToScevMapT <S) { 223 const Value *Pointer = Load->getPointerOperand(); 224 Value *NewPointer = 225 generateLocationAccessed(Stmt, Load, Pointer, BBMap, GlobalMap, LTS); 226 Value *ScalarLoad = Builder.CreateAlignedLoad( 227 NewPointer, Load->getAlignment(), Load->getName() + "_p_scalar_"); 228 return ScalarLoad; 229 } 230 231 Value *BlockGenerator::generateScalarStore(ScopStmt &Stmt, 232 const StoreInst *Store, 233 ValueMapT &BBMap, 234 ValueMapT &GlobalMap, 235 LoopToScevMapT <S) { 236 const Value *Pointer = Store->getPointerOperand(); 237 Value *NewPointer = 238 generateLocationAccessed(Stmt, Store, Pointer, BBMap, GlobalMap, LTS); 239 Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap, 240 GlobalMap, LTS, getLoopForInst(Store)); 241 242 Value *NewStore = Builder.CreateAlignedStore(ValueOperand, NewPointer, 243 Store->getAlignment()); 244 return NewStore; 245 } 246 247 void BlockGenerator::copyInstruction(ScopStmt &Stmt, const Instruction *Inst, 248 ValueMapT &BBMap, ValueMapT &GlobalMap, 249 LoopToScevMapT <S) { 250 251 // First check for possible scalar dependences for this instruction. 252 generateScalarLoads(Stmt, Inst, BBMap); 253 254 // Terminator instructions control the control flow. They are explicitly 255 // expressed in the clast and do not need to be copied. 256 if (Inst->isTerminator()) 257 return; 258 259 Loop *L = getLoopForInst(Inst); 260 if ((Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) && 261 canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) { 262 Value *NewValue = getNewValue(Stmt, Inst, BBMap, GlobalMap, LTS, L); 263 BBMap[Inst] = NewValue; 264 return; 265 } 266 267 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 268 Value *NewLoad = generateScalarLoad(Stmt, Load, BBMap, GlobalMap, LTS); 269 // Compute NewLoad before its insertion in BBMap to make the insertion 270 // deterministic. 271 BBMap[Load] = NewLoad; 272 return; 273 } 274 275 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 276 Value *NewStore = generateScalarStore(Stmt, Store, BBMap, GlobalMap, LTS); 277 // Compute NewStore before its insertion in BBMap to make the insertion 278 // deterministic. 279 BBMap[Store] = NewStore; 280 return; 281 } 282 283 if (const PHINode *PHI = dyn_cast<PHINode>(Inst)) { 284 copyPHIInstruction(Stmt, PHI, BBMap, GlobalMap, LTS); 285 return; 286 } 287 288 // Skip some special intrinsics for which we do not adjust the semantics to 289 // the new schedule. All others are handled like every other instruction. 290 if (auto *IT = dyn_cast<IntrinsicInst>(Inst)) { 291 switch (IT->getIntrinsicID()) { 292 // Lifetime markers are ignored. 293 case llvm::Intrinsic::lifetime_start: 294 case llvm::Intrinsic::lifetime_end: 295 // Invariant markers are ignored. 296 case llvm::Intrinsic::invariant_start: 297 case llvm::Intrinsic::invariant_end: 298 // Some misc annotations are ignored. 299 case llvm::Intrinsic::var_annotation: 300 case llvm::Intrinsic::ptr_annotation: 301 case llvm::Intrinsic::annotation: 302 case llvm::Intrinsic::donothing: 303 case llvm::Intrinsic::assume: 304 case llvm::Intrinsic::expect: 305 return; 306 default: 307 // Other intrinsics are copied. 308 break; 309 } 310 } 311 312 copyInstScalar(Stmt, Inst, BBMap, GlobalMap, LTS); 313 } 314 315 void BlockGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 316 LoopToScevMapT <S) { 317 assert(Stmt.isBlockStmt() && 318 "Only block statements can be copied by the block generator"); 319 320 ValueMapT BBMap; 321 322 BasicBlock *BB = Stmt.getBasicBlock(); 323 copyBB(Stmt, BB, BBMap, GlobalMap, LTS); 324 } 325 326 BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) { 327 BasicBlock *CopyBB = 328 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 329 CopyBB->setName("polly.stmt." + BB->getName()); 330 return CopyBB; 331 } 332 333 BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, 334 ValueMapT &BBMap, ValueMapT &GlobalMap, 335 LoopToScevMapT <S) { 336 BasicBlock *CopyBB = splitBB(BB); 337 copyBB(Stmt, BB, CopyBB, BBMap, GlobalMap, LTS); 338 return CopyBB; 339 } 340 341 void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB, 342 ValueMapT &BBMap, ValueMapT &GlobalMap, 343 LoopToScevMapT <S) { 344 Builder.SetInsertPoint(CopyBB->begin()); 345 EntryBB = &CopyBB->getParent()->getEntryBlock(); 346 347 for (Instruction &Inst : *BB) 348 copyInstruction(Stmt, &Inst, BBMap, GlobalMap, LTS); 349 350 // After a basic block was copied store all scalars that escape this block 351 // in their alloca. First the scalars that have dependences inside the SCoP, 352 // then the ones that might escape the SCoP. 353 generateScalarStores(Stmt, BB, BBMap, GlobalMap); 354 355 const Region &R = Stmt.getParent()->getRegion(); 356 for (Instruction &Inst : *BB) 357 handleOutsideUsers(R, &Inst, BBMap[&Inst]); 358 } 359 360 AllocaInst *BlockGenerator::getOrCreateAlloca(Instruction *ScalarBase, 361 ScalarAllocaMapTy &Map, 362 const char *NameExt, 363 bool *IsNew) { 364 365 // Check if an alloca was cached for the base instruction. 366 AllocaInst *&Addr = Map[ScalarBase]; 367 368 // If needed indicate if it was found already or will be created. 369 if (IsNew) 370 *IsNew = (Addr == nullptr); 371 372 // If no alloca was found create one and insert it in the entry block. 373 if (!Addr) { 374 auto *Ty = ScalarBase->getType(); 375 Addr = new AllocaInst(Ty, ScalarBase->getName() + NameExt); 376 Addr->insertBefore(EntryBB->getFirstInsertionPt()); 377 } 378 379 return Addr; 380 } 381 382 void BlockGenerator::handleOutsideUsers(const Region &R, Instruction *Inst, 383 Value *InstCopy) { 384 BasicBlock *ExitBB = R.getExit(); 385 386 EscapeUserVectorTy EscapeUsers; 387 for (User *U : Inst->users()) { 388 389 // Non-instruction user will never escape. 390 Instruction *UI = dyn_cast<Instruction>(U); 391 if (!UI) 392 continue; 393 394 if (R.contains(UI) && ExitBB != UI->getParent()) 395 continue; 396 397 EscapeUsers.push_back(UI); 398 } 399 400 // Exit if no escape uses were found. 401 if (EscapeUsers.empty()) 402 return; 403 404 // If there are escape users we get the alloca for this instruction and put 405 // it in the EscapeMap for later finalization. However, if the alloca was not 406 // created by an already handled scalar dependence we have to initialize it 407 // also. Lastly, if the instruction was copied multiple times we already did 408 // this and can exit. 409 if (EscapeMap.count(Inst)) 410 return; 411 412 // Get or create an escape alloca for this instruction. 413 bool IsNew; 414 AllocaInst *ScalarAddr = 415 getOrCreateAlloca(Inst, ScalarMap, ".escape", &IsNew); 416 417 // Remember that this instruction has escape uses and the escape alloca. 418 EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers)); 419 420 // If the escape alloca was just created store the instruction in there, 421 // otherwise that happened already. 422 if (IsNew) { 423 assert(InstCopy && "Except PHIs every instruction should have a copy!"); 424 Builder.CreateStore(InstCopy, ScalarAddr); 425 } 426 } 427 428 void BlockGenerator::generateScalarLoads(ScopStmt &Stmt, 429 const Instruction *Inst, 430 ValueMapT &BBMap) { 431 auto *MAL = Stmt.lookupAccessesFor(Inst); 432 433 if (!MAL) 434 return; 435 436 for (MemoryAccess &MA : *MAL) { 437 AllocaInst *Address; 438 if (!MA.isScalar() || !MA.isRead()) 439 continue; 440 441 auto Base = cast<Instruction>(MA.getBaseAddr()); 442 443 // This is either a common scalar use (second case) or the use of a phi 444 // operand by the PHI node (first case). 445 if (isa<PHINode>(Base) && Base == MA.getAccessInstruction()) 446 Address = getOrCreateAlloca(Base, PHIOpMap, ".phiops"); 447 else 448 Address = getOrCreateAlloca(Base, ScalarMap, ".s2a"); 449 450 BBMap[Base] = Builder.CreateLoad(Address, Address->getName() + ".reload"); 451 } 452 } 453 454 Value *BlockGenerator::getNewScalarValue(Value *ScalarValue, const Region &R, 455 ScalarAllocaMapTy &ReloadMap, 456 ValueMapT &BBMap, 457 ValueMapT &GlobalMap) { 458 // If the value we want to store is an instruction we might have demoted it 459 // in order to make it accessible here. In such a case a reload is 460 // necessary. If it is no instruction it will always be a value that 461 // dominates the current point and we can just use it. In total there are 4 462 // options: 463 // (1) The value is no instruction ==> use the value. 464 // (2) The value is an instruction that was split out of the region prior to 465 // code generation ==> use the instruction as it dominates the region. 466 // (3) The value is an instruction: 467 // (a) The value was defined in the current block, thus a copy is in 468 // the BBMap ==> use the mapped value. 469 // (b) The value was defined in a previous block, thus we demoted it 470 // earlier ==> use the reloaded value. 471 Instruction *ScalarValueInst = dyn_cast<Instruction>(ScalarValue); 472 if (!ScalarValueInst) 473 return ScalarValue; 474 475 if (!R.contains(ScalarValueInst)) { 476 if (Value *ScalarValueCopy = GlobalMap.lookup(ScalarValueInst)) 477 return /* Case (3a) */ ScalarValueCopy; 478 else 479 return /* Case 2 */ ScalarValue; 480 } 481 482 if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst)) 483 return /* Case (3a) */ ScalarValueCopy; 484 485 // Case (3b) 486 assert(ReloadMap.count(ScalarValueInst) && 487 "ScalarInst not mapped in the block and not in the given reload map!"); 488 Value *ReloadAddr = ReloadMap[ScalarValueInst]; 489 ScalarValue = 490 Builder.CreateLoad(ReloadAddr, ReloadAddr->getName() + ".reload"); 491 492 return ScalarValue; 493 } 494 495 void BlockGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 496 ValueMapT &BBMap, 497 ValueMapT &GlobalMap) { 498 const Region &R = Stmt.getParent()->getRegion(); 499 500 assert(Stmt.isBlockStmt() && BB == Stmt.getBasicBlock() && 501 "Region statements need to use the generateScalarStores() " 502 "function in the RegionGenerator"); 503 504 // Set to remember a store to the phiops alloca of a PHINode. It is needed as 505 // we might have multiple write accesses to the same PHI and while one is the 506 // self write of the PHI (to the ScalarMap alloca) the other is the write to 507 // the operand alloca (PHIOpMap). 508 SmallPtrSet<PHINode *, 4> SeenPHIs; 509 510 // Iterate over all accesses in the given statement. 511 for (MemoryAccess *MA : Stmt) { 512 513 // Skip non-scalar and read accesses. 514 if (!MA->isScalar() || MA->isRead()) 515 continue; 516 517 Instruction *ScalarBase = cast<Instruction>(MA->getBaseAddr()); 518 Instruction *ScalarInst = MA->getAccessInstruction(); 519 PHINode *ScalarBasePHI = dyn_cast<PHINode>(ScalarBase); 520 521 // Get the alloca node for the base instruction and the value we want to 522 // store. In total there are 4 options: 523 // (1) The base is no PHI, hence it is a simple scalar def-use chain. 524 // (2) The base is a PHI, 525 // (a) and the write is caused by an operand in the block. 526 // (b) and it is the PHI self write (same as case (1)). 527 // (c) (2a) and (2b) are not distinguishable. 528 // For case (1) and (2b) we get the alloca from the scalar map and the value 529 // we want to store is initialized with the instruction attached to the 530 // memory access. For case (2a) we get the alloca from the PHI operand map 531 // and the value we want to store is initialized with the incoming value for 532 // this block. The tricky case (2c) is when both (2a) and (2b) match. This 533 // happens if the PHI operand is in the same block as the PHI. To handle 534 // that we choose the alloca of (2a) first and (2b) for the next write 535 // access to that PHI (there must be 2). 536 Value *ScalarValue = nullptr; 537 AllocaInst *ScalarAddr = nullptr; 538 539 if (!ScalarBasePHI) { 540 // Case (1) 541 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 542 ScalarValue = ScalarInst; 543 } else { 544 int PHIIdx = ScalarBasePHI->getBasicBlockIndex(BB); 545 if (ScalarBasePHI != ScalarInst) { 546 // Case (2a) 547 assert(PHIIdx >= 0 && "Bad scalar write to PHI operand"); 548 SeenPHIs.insert(ScalarBasePHI); 549 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 550 ScalarValue = ScalarBasePHI->getIncomingValue(PHIIdx); 551 } else if (PHIIdx < 0) { 552 // Case (2b) 553 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 554 ScalarValue = ScalarInst; 555 } else { 556 // Case (2c) 557 if (SeenPHIs.insert(ScalarBasePHI).second) { 558 // First access ==> same as (2a) 559 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 560 ScalarValue = ScalarBasePHI->getIncomingValue(PHIIdx); 561 } else { 562 // Second access ==> same as (2b) 563 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 564 ScalarValue = ScalarInst; 565 } 566 } 567 } 568 569 ScalarValue = 570 getNewScalarValue(ScalarValue, R, ScalarMap, BBMap, GlobalMap); 571 Builder.CreateStore(ScalarValue, ScalarAddr); 572 } 573 } 574 575 void BlockGenerator::createScalarInitialization(Region &R, 576 ValueMapT &GlobalMap) { 577 // The split block __just before__ the region and optimized region. 578 BasicBlock *SplitBB = R.getEnteringBlock(); 579 BranchInst *SplitBBTerm = cast<BranchInst>(SplitBB->getTerminator()); 580 assert(SplitBBTerm->getNumSuccessors() == 2 && "Bad region entering block!"); 581 582 // Get the start block of the __optimized__ region. 583 BasicBlock *StartBB = SplitBBTerm->getSuccessor(0); 584 if (StartBB == R.getEntry()) 585 StartBB = SplitBBTerm->getSuccessor(1); 586 587 // For each PHI predecessor outside the region store the incoming operand 588 // value prior to entering the optimized region. 589 Builder.SetInsertPoint(StartBB->getTerminator()); 590 591 ScalarAllocaMapTy EmptyMap; 592 for (const auto &PHIOpMapping : PHIOpMap) { 593 const PHINode *PHI = cast<PHINode>(PHIOpMapping.getFirst()); 594 595 // Check if this PHI has the split block as predecessor (that is the only 596 // possible predecessor outside the SCoP). 597 int idx = PHI->getBasicBlockIndex(SplitBB); 598 if (idx < 0) 599 continue; 600 601 Value *ScalarValue = PHI->getIncomingValue(idx); 602 ScalarValue = 603 getNewScalarValue(ScalarValue, R, EmptyMap, GlobalMap, GlobalMap); 604 605 // If the split block is the predecessor initialize the PHI operator alloca. 606 Builder.CreateStore(ScalarValue, PHIOpMapping.getSecond()); 607 } 608 } 609 610 void BlockGenerator::createScalarFinalization(Region &R) { 611 // The exit block of the __unoptimized__ region. 612 BasicBlock *ExitBB = R.getExitingBlock(); 613 // The merge block __just after__ the region and the optimized region. 614 BasicBlock *MergeBB = R.getExit(); 615 616 // The exit block of the __optimized__ region. 617 BasicBlock *OptExitBB = *(pred_begin(MergeBB)); 618 if (OptExitBB == ExitBB) 619 OptExitBB = *(++pred_begin(MergeBB)); 620 621 Builder.SetInsertPoint(OptExitBB->getTerminator()); 622 for (const auto &EscapeMapping : EscapeMap) { 623 // Extract the escaping instruction and the escaping users as well as the 624 // alloca the instruction was demoted to. 625 Instruction *EscapeInst = EscapeMapping.getFirst(); 626 const auto &EscapeMappingValue = EscapeMapping.getSecond(); 627 const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second; 628 AllocaInst *ScalarAddr = EscapeMappingValue.first; 629 630 // Reload the demoted instruction in the optimized version of the SCoP. 631 Instruction *EscapeInstReload = 632 Builder.CreateLoad(ScalarAddr, EscapeInst->getName() + ".final_reload"); 633 634 // Create the merge PHI that merges the optimized and unoptimized version. 635 PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2, 636 EscapeInst->getName() + ".merge"); 637 MergePHI->insertBefore(MergeBB->getFirstInsertionPt()); 638 639 // Add the respective values to the merge PHI. 640 MergePHI->addIncoming(EscapeInstReload, OptExitBB); 641 MergePHI->addIncoming(EscapeInst, ExitBB); 642 643 // The information of scalar evolution about the escaping instruction needs 644 // to be revoked so the new merged instruction will be used. 645 if (SE.isSCEVable(EscapeInst->getType())) 646 SE.forgetValue(EscapeInst); 647 648 // Replace all uses of the demoted instruction with the merge PHI. 649 for (Instruction *EUser : EscapeUsers) 650 EUser->replaceUsesOfWith(EscapeInst, MergePHI); 651 } 652 } 653 654 void BlockGenerator::finalizeSCoP(Scop &S, ValueMapT &GlobalMap) { 655 createScalarInitialization(S.getRegion(), GlobalMap); 656 createScalarFinalization(S.getRegion()); 657 } 658 659 VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen, 660 VectorValueMapT &GlobalMaps, 661 std::vector<LoopToScevMapT> &VLTS, 662 isl_map *Schedule) 663 : BlockGenerator(BlockGen), GlobalMaps(GlobalMaps), VLTS(VLTS), 664 Schedule(Schedule) { 665 assert(GlobalMaps.size() > 1 && "Only one vector lane found"); 666 assert(Schedule && "No statement domain provided"); 667 } 668 669 Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, const Value *Old, 670 ValueMapT &VectorMap, 671 VectorValueMapT &ScalarMaps, 672 Loop *L) { 673 if (Value *NewValue = VectorMap.lookup(Old)) 674 return NewValue; 675 676 int Width = getVectorWidth(); 677 678 Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width)); 679 680 for (int Lane = 0; Lane < Width; Lane++) 681 Vector = Builder.CreateInsertElement( 682 Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], GlobalMaps[Lane], 683 VLTS[Lane], L), 684 Builder.getInt32(Lane)); 685 686 VectorMap[Old] = Vector; 687 688 return Vector; 689 } 690 691 Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) { 692 PointerType *PointerTy = dyn_cast<PointerType>(Val->getType()); 693 assert(PointerTy && "PointerType expected"); 694 695 Type *ScalarType = PointerTy->getElementType(); 696 VectorType *VectorType = VectorType::get(ScalarType, Width); 697 698 return PointerType::getUnqual(VectorType); 699 } 700 701 Value *VectorBlockGenerator::generateStrideOneLoad( 702 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps, 703 bool NegativeStride = false) { 704 unsigned VectorWidth = getVectorWidth(); 705 const Value *Pointer = Load->getPointerOperand(); 706 Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth); 707 unsigned Offset = NegativeStride ? VectorWidth - 1 : 0; 708 709 Value *NewPointer = nullptr; 710 NewPointer = generateLocationAccessed(Stmt, Load, Pointer, ScalarMaps[Offset], 711 GlobalMaps[Offset], VLTS[Offset]); 712 Value *VectorPtr = 713 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 714 LoadInst *VecLoad = 715 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full"); 716 if (!Aligned) 717 VecLoad->setAlignment(8); 718 719 if (NegativeStride) { 720 SmallVector<Constant *, 16> Indices; 721 for (int i = VectorWidth - 1; i >= 0; i--) 722 Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i)); 723 Constant *SV = llvm::ConstantVector::get(Indices); 724 Value *RevVecLoad = Builder.CreateShuffleVector( 725 VecLoad, VecLoad, SV, Load->getName() + "_reverse"); 726 return RevVecLoad; 727 } 728 729 return VecLoad; 730 } 731 732 Value *VectorBlockGenerator::generateStrideZeroLoad(ScopStmt &Stmt, 733 const LoadInst *Load, 734 ValueMapT &BBMap) { 735 const Value *Pointer = Load->getPointerOperand(); 736 Type *VectorPtrType = getVectorPtrTy(Pointer, 1); 737 Value *NewPointer = generateLocationAccessed(Stmt, Load, Pointer, BBMap, 738 GlobalMaps[0], VLTS[0]); 739 Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType, 740 Load->getName() + "_p_vec_p"); 741 LoadInst *ScalarLoad = 742 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one"); 743 744 if (!Aligned) 745 ScalarLoad->setAlignment(8); 746 747 Constant *SplatVector = Constant::getNullValue( 748 VectorType::get(Builder.getInt32Ty(), getVectorWidth())); 749 750 Value *VectorLoad = Builder.CreateShuffleVector( 751 ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat"); 752 return VectorLoad; 753 } 754 755 Value *VectorBlockGenerator::generateUnknownStrideLoad( 756 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps) { 757 int VectorWidth = getVectorWidth(); 758 const Value *Pointer = Load->getPointerOperand(); 759 VectorType *VectorType = VectorType::get( 760 dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth); 761 762 Value *Vector = UndefValue::get(VectorType); 763 764 for (int i = 0; i < VectorWidth; i++) { 765 Value *NewPointer = generateLocationAccessed( 766 Stmt, Load, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 767 Value *ScalarLoad = 768 Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_"); 769 Vector = Builder.CreateInsertElement( 770 Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_"); 771 } 772 773 return Vector; 774 } 775 776 void VectorBlockGenerator::generateLoad(ScopStmt &Stmt, const LoadInst *Load, 777 ValueMapT &VectorMap, 778 VectorValueMapT &ScalarMaps) { 779 if (!VectorType::isValidElementType(Load->getType())) { 780 for (int i = 0; i < getVectorWidth(); i++) 781 ScalarMaps[i][Load] = 782 generateScalarLoad(Stmt, Load, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 783 return; 784 } 785 786 const MemoryAccess &Access = Stmt.getAccessFor(Load); 787 788 // Make sure we have scalar values available to access the pointer to 789 // the data location. 790 extractScalarValues(Load, VectorMap, ScalarMaps); 791 792 Value *NewLoad; 793 if (Access.isStrideZero(isl_map_copy(Schedule))) 794 NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0]); 795 else if (Access.isStrideOne(isl_map_copy(Schedule))) 796 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps); 797 else if (Access.isStrideX(isl_map_copy(Schedule), -1)) 798 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, true); 799 else 800 NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps); 801 802 VectorMap[Load] = NewLoad; 803 } 804 805 void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, 806 const UnaryInstruction *Inst, 807 ValueMapT &VectorMap, 808 VectorValueMapT &ScalarMaps) { 809 int VectorWidth = getVectorWidth(); 810 Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap, 811 ScalarMaps, getLoopForInst(Inst)); 812 813 assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction"); 814 815 const CastInst *Cast = dyn_cast<CastInst>(Inst); 816 VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth); 817 VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType); 818 } 819 820 void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, 821 const BinaryOperator *Inst, 822 ValueMapT &VectorMap, 823 VectorValueMapT &ScalarMaps) { 824 Loop *L = getLoopForInst(Inst); 825 Value *OpZero = Inst->getOperand(0); 826 Value *OpOne = Inst->getOperand(1); 827 828 Value *NewOpZero, *NewOpOne; 829 NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L); 830 NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L); 831 832 Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne, 833 Inst->getName() + "p_vec"); 834 VectorMap[Inst] = NewInst; 835 } 836 837 void VectorBlockGenerator::copyStore(ScopStmt &Stmt, const StoreInst *Store, 838 ValueMapT &VectorMap, 839 VectorValueMapT &ScalarMaps) { 840 const MemoryAccess &Access = Stmt.getAccessFor(Store); 841 842 const Value *Pointer = Store->getPointerOperand(); 843 Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap, 844 ScalarMaps, getLoopForInst(Store)); 845 846 // Make sure we have scalar values available to access the pointer to 847 // the data location. 848 extractScalarValues(Store, VectorMap, ScalarMaps); 849 850 if (Access.isStrideOne(isl_map_copy(Schedule))) { 851 Type *VectorPtrType = getVectorPtrTy(Pointer, getVectorWidth()); 852 Value *NewPointer = generateLocationAccessed( 853 Stmt, Store, Pointer, ScalarMaps[0], GlobalMaps[0], VLTS[0]); 854 855 Value *VectorPtr = 856 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 857 StoreInst *Store = Builder.CreateStore(Vector, VectorPtr); 858 859 if (!Aligned) 860 Store->setAlignment(8); 861 } else { 862 for (unsigned i = 0; i < ScalarMaps.size(); i++) { 863 Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i)); 864 Value *NewPointer = generateLocationAccessed( 865 Stmt, Store, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 866 Builder.CreateStore(Scalar, NewPointer); 867 } 868 } 869 } 870 871 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst, 872 ValueMapT &VectorMap) { 873 for (Value *Operand : Inst->operands()) 874 if (VectorMap.count(Operand)) 875 return true; 876 return false; 877 } 878 879 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst, 880 ValueMapT &VectorMap, 881 VectorValueMapT &ScalarMaps) { 882 bool HasVectorOperand = false; 883 int VectorWidth = getVectorWidth(); 884 885 for (Value *Operand : Inst->operands()) { 886 ValueMapT::iterator VecOp = VectorMap.find(Operand); 887 888 if (VecOp == VectorMap.end()) 889 continue; 890 891 HasVectorOperand = true; 892 Value *NewVector = VecOp->second; 893 894 for (int i = 0; i < VectorWidth; ++i) { 895 ValueMapT &SM = ScalarMaps[i]; 896 897 // If there is one scalar extracted, all scalar elements should have 898 // already been extracted by the code here. So no need to check for the 899 // existance of all of them. 900 if (SM.count(Operand)) 901 break; 902 903 SM[Operand] = 904 Builder.CreateExtractElement(NewVector, Builder.getInt32(i)); 905 } 906 } 907 908 return HasVectorOperand; 909 } 910 911 void VectorBlockGenerator::copyInstScalarized(ScopStmt &Stmt, 912 const Instruction *Inst, 913 ValueMapT &VectorMap, 914 VectorValueMapT &ScalarMaps) { 915 bool HasVectorOperand; 916 int VectorWidth = getVectorWidth(); 917 918 HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps); 919 920 for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++) 921 BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane], 922 GlobalMaps[VectorLane], VLTS[VectorLane]); 923 924 if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand) 925 return; 926 927 // Make the result available as vector value. 928 VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth); 929 Value *Vector = UndefValue::get(VectorType); 930 931 for (int i = 0; i < VectorWidth; i++) 932 Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst], 933 Builder.getInt32(i)); 934 935 VectorMap[Inst] = Vector; 936 } 937 938 int VectorBlockGenerator::getVectorWidth() { return GlobalMaps.size(); } 939 940 void VectorBlockGenerator::copyInstruction(ScopStmt &Stmt, 941 const Instruction *Inst, 942 ValueMapT &VectorMap, 943 VectorValueMapT &ScalarMaps) { 944 // Terminator instructions control the control flow. They are explicitly 945 // expressed in the clast and do not need to be copied. 946 if (Inst->isTerminator()) 947 return; 948 949 if (canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) 950 return; 951 952 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 953 generateLoad(Stmt, Load, VectorMap, ScalarMaps); 954 return; 955 } 956 957 if (hasVectorOperands(Inst, VectorMap)) { 958 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 959 copyStore(Stmt, Store, VectorMap, ScalarMaps); 960 return; 961 } 962 963 if (const UnaryInstruction *Unary = dyn_cast<UnaryInstruction>(Inst)) { 964 copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps); 965 return; 966 } 967 968 if (const BinaryOperator *Binary = dyn_cast<BinaryOperator>(Inst)) { 969 copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps); 970 return; 971 } 972 973 // Falltrough: We generate scalar instructions, if we don't know how to 974 // generate vector code. 975 } 976 977 copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps); 978 } 979 980 void VectorBlockGenerator::copyStmt(ScopStmt &Stmt) { 981 assert(Stmt.isBlockStmt() && "TODO: Only block statements can be copied by " 982 "the vector block generator"); 983 984 BasicBlock *BB = Stmt.getBasicBlock(); 985 BasicBlock *CopyBB = 986 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 987 CopyBB->setName("polly.stmt." + BB->getName()); 988 Builder.SetInsertPoint(CopyBB->begin()); 989 990 // Create two maps that store the mapping from the original instructions of 991 // the old basic block to their copies in the new basic block. Those maps 992 // are basic block local. 993 // 994 // As vector code generation is supported there is one map for scalar values 995 // and one for vector values. 996 // 997 // In case we just do scalar code generation, the vectorMap is not used and 998 // the scalarMap has just one dimension, which contains the mapping. 999 // 1000 // In case vector code generation is done, an instruction may either appear 1001 // in the vector map once (as it is calculating >vectorwidth< values at a 1002 // time. Or (if the values are calculated using scalar operations), it 1003 // appears once in every dimension of the scalarMap. 1004 VectorValueMapT ScalarBlockMap(getVectorWidth()); 1005 ValueMapT VectorBlockMap; 1006 1007 for (Instruction &Inst : *BB) 1008 copyInstruction(Stmt, &Inst, VectorBlockMap, ScalarBlockMap); 1009 } 1010 1011 BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB, 1012 BasicBlock *BBCopy) { 1013 1014 BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock(); 1015 BasicBlock *BBCopyIDom = BlockMap.lookup(BBIDom); 1016 1017 if (BBCopyIDom) 1018 DT.changeImmediateDominator(BBCopy, BBCopyIDom); 1019 1020 return BBCopyIDom; 1021 } 1022 1023 void RegionGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 1024 LoopToScevMapT <S) { 1025 assert(Stmt.isRegionStmt() && 1026 "Only region statements can be copied by the block generator"); 1027 1028 // Forget all old mappings. 1029 BlockMap.clear(); 1030 RegionMaps.clear(); 1031 IncompletePHINodeMap.clear(); 1032 1033 // The region represented by the statement. 1034 Region *R = Stmt.getRegion(); 1035 1036 // Create a dedicated entry for the region where we can reload all demoted 1037 // inputs. 1038 BasicBlock *EntryBB = R->getEntry(); 1039 BasicBlock *EntryBBCopy = 1040 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 1041 EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry"); 1042 Builder.SetInsertPoint(EntryBBCopy->begin()); 1043 1044 for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI) 1045 if (!R->contains(*PI)) 1046 BlockMap[*PI] = EntryBBCopy; 1047 1048 // Iterate over all blocks in the region in a breadth-first search. 1049 std::deque<BasicBlock *> Blocks; 1050 SmallPtrSet<BasicBlock *, 8> SeenBlocks; 1051 Blocks.push_back(EntryBB); 1052 SeenBlocks.insert(EntryBB); 1053 1054 while (!Blocks.empty()) { 1055 BasicBlock *BB = Blocks.front(); 1056 Blocks.pop_front(); 1057 1058 // First split the block and update dominance information. 1059 BasicBlock *BBCopy = splitBB(BB); 1060 BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy); 1061 1062 // In order to remap PHI nodes we store also basic block mappings. 1063 BlockMap[BB] = BBCopy; 1064 1065 // Get the mapping for this block and initialize it with the mapping 1066 // available at its immediate dominator (in the new region). 1067 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1068 RegionMap = RegionMaps[BBCopyIDom]; 1069 1070 // Copy the block with the BlockGenerator. 1071 copyBB(Stmt, BB, BBCopy, RegionMap, GlobalMap, LTS); 1072 1073 // In order to remap PHI nodes we store also basic block mappings. 1074 BlockMap[BB] = BBCopy; 1075 1076 // Add values to incomplete PHI nodes waiting for this block to be copied. 1077 for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB]) 1078 addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, 1079 GlobalMap, LTS); 1080 IncompletePHINodeMap[BB].clear(); 1081 1082 // And continue with new successors inside the region. 1083 for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++) 1084 if (R->contains(*SI) && SeenBlocks.insert(*SI).second) 1085 Blocks.push_back(*SI); 1086 } 1087 1088 // Now create a new dedicated region exit block and add it to the region map. 1089 BasicBlock *ExitBBCopy = 1090 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 1091 ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit"); 1092 BlockMap[R->getExit()] = ExitBBCopy; 1093 1094 repairDominance(R->getExit(), ExitBBCopy); 1095 1096 // As the block generator doesn't handle control flow we need to add the 1097 // region control flow by hand after all blocks have been copied. 1098 for (BasicBlock *BB : SeenBlocks) { 1099 1100 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 1101 1102 BasicBlock *BBCopy = BlockMap[BB]; 1103 Instruction *BICopy = BBCopy->getTerminator(); 1104 1105 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1106 RegionMap.insert(BlockMap.begin(), BlockMap.end()); 1107 1108 Builder.SetInsertPoint(BBCopy); 1109 copyInstScalar(Stmt, BI, RegionMap, GlobalMap, LTS); 1110 BICopy->eraseFromParent(); 1111 } 1112 1113 // Add counting PHI nodes to all loops in the region that can be used as 1114 // replacement for SCEVs refering to the old loop. 1115 for (BasicBlock *BB : SeenBlocks) { 1116 Loop *L = LI.getLoopFor(BB); 1117 if (L == nullptr || L->getHeader() != BB) 1118 continue; 1119 1120 BasicBlock *BBCopy = BlockMap[BB]; 1121 Value *NullVal = Builder.getInt32(0); 1122 PHINode *LoopPHI = 1123 PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv"); 1124 Instruction *LoopPHIInc = BinaryOperator::CreateAdd( 1125 LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc"); 1126 LoopPHI->insertBefore(BBCopy->begin()); 1127 LoopPHIInc->insertBefore(BBCopy->getTerminator()); 1128 1129 for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) { 1130 if (!R->contains(PredBB)) 1131 continue; 1132 if (L->contains(PredBB)) 1133 LoopPHI->addIncoming(LoopPHIInc, BlockMap[PredBB]); 1134 else 1135 LoopPHI->addIncoming(NullVal, BlockMap[PredBB]); 1136 } 1137 1138 for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy))) 1139 if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0) 1140 LoopPHI->addIncoming(NullVal, PredBBCopy); 1141 1142 LTS[L] = SE.getUnknown(LoopPHI); 1143 } 1144 1145 // Add all mappings from the region to the global map so outside uses will use 1146 // the copied instructions. 1147 for (auto &BBMap : RegionMaps) 1148 GlobalMap.insert(BBMap.second.begin(), BBMap.second.end()); 1149 1150 // Reset the old insert point for the build. 1151 Builder.SetInsertPoint(ExitBBCopy->begin()); 1152 } 1153 1154 void RegionGenerator::generateScalarLoads(ScopStmt &Stmt, 1155 const Instruction *Inst, 1156 ValueMapT &BBMap) { 1157 1158 // Inside a non-affine region PHI nodes are copied not demoted. Once the 1159 // phi is copied it will reload all inputs from outside the region, hence 1160 // we do not need to generate code for the read access of the operands of a 1161 // PHI. 1162 if (isa<PHINode>(Inst)) 1163 return; 1164 1165 return BlockGenerator::generateScalarLoads(Stmt, Inst, BBMap); 1166 } 1167 1168 void RegionGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 1169 ValueMapT &BBMap, 1170 ValueMapT &GlobalMap) { 1171 const Region &R = Stmt.getParent()->getRegion(); 1172 1173 Region *StmtR = Stmt.getRegion(); 1174 assert(StmtR && "Block statements need to use the generateScalarStores() " 1175 "function in the BlockGenerator"); 1176 1177 BasicBlock *ExitBB = StmtR->getExit(); 1178 1179 // For region statements three kinds of scalar stores exists: 1180 // (1) A definition used by a non-phi instruction outside the region. 1181 // (2) A phi-instruction in the region entry. 1182 // (3) A write to a phi instruction in the region exit. 1183 // The last case is the tricky one since we do not know anymore which 1184 // predecessor of the exit needs to store the operand value that doesn't 1185 // have a definition in the region. Therefore, we have to check in each 1186 // block in the region if we should store the value or not. 1187 1188 // Iterate over all accesses in the given statement. 1189 for (MemoryAccess *MA : Stmt) { 1190 1191 // Skip non-scalar and read accesses. 1192 if (!MA->isScalar() || MA->isRead()) 1193 continue; 1194 1195 Instruction *ScalarBase = cast<Instruction>(MA->getBaseAddr()); 1196 Instruction *ScalarInst = MA->getAccessInstruction(); 1197 PHINode *ScalarBasePHI = dyn_cast<PHINode>(ScalarBase); 1198 1199 Value *ScalarValue = nullptr; 1200 AllocaInst *ScalarAddr = nullptr; 1201 1202 if (!ScalarBasePHI) { 1203 // Case (1) 1204 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 1205 ScalarValue = ScalarInst; 1206 } else if (ScalarBasePHI->getParent() != ExitBB) { 1207 // Case (2) 1208 assert(ScalarBasePHI->getParent() == StmtR->getEntry() && 1209 "Bad PHI self write in non-affine region"); 1210 assert(ScalarBase == ScalarInst && 1211 "Bad PHI self write in non-affine region"); 1212 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 1213 ScalarValue = ScalarInst; 1214 } else { 1215 int PHIIdx = ScalarBasePHI->getBasicBlockIndex(BB); 1216 // Skip accesses we will not handle in this basic block but in another one 1217 // in the statement region. 1218 if (PHIIdx < 0) 1219 continue; 1220 1221 // Case (3) 1222 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 1223 ScalarValue = ScalarBasePHI->getIncomingValue(PHIIdx); 1224 } 1225 1226 ScalarValue = 1227 getNewScalarValue(ScalarValue, R, ScalarMap, BBMap, GlobalMap); 1228 Builder.CreateStore(ScalarValue, ScalarAddr); 1229 } 1230 } 1231 1232 void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, const PHINode *PHI, 1233 PHINode *PHICopy, BasicBlock *IncomingBB, 1234 ValueMapT &GlobalMap, 1235 LoopToScevMapT <S) { 1236 Region *StmtR = Stmt.getRegion(); 1237 1238 // If the incoming block was not yet copied mark this PHI as incomplete. 1239 // Once the block will be copied the incoming value will be added. 1240 BasicBlock *BBCopy = BlockMap[IncomingBB]; 1241 if (!BBCopy) { 1242 assert(StmtR->contains(IncomingBB) && 1243 "Bad incoming block for PHI in non-affine region"); 1244 IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy)); 1245 return; 1246 } 1247 1248 Value *OpCopy = nullptr; 1249 if (StmtR->contains(IncomingBB)) { 1250 assert(RegionMaps.count(BBCopy) && 1251 "Incoming PHI block did not have a BBMap"); 1252 ValueMapT &BBCopyMap = RegionMaps[BBCopy]; 1253 1254 Value *Op = PHI->getIncomingValueForBlock(IncomingBB); 1255 OpCopy = 1256 getNewValue(Stmt, Op, BBCopyMap, GlobalMap, LTS, getLoopForInst(PHI)); 1257 } else { 1258 1259 if (PHICopy->getBasicBlockIndex(BBCopy) >= 0) 1260 return; 1261 1262 AllocaInst *PHIOpAddr = 1263 getOrCreateAlloca(const_cast<PHINode *>(PHI), PHIOpMap, ".phiops"); 1264 OpCopy = new LoadInst(PHIOpAddr, PHIOpAddr->getName() + ".reload", 1265 BlockMap[IncomingBB]->getTerminator()); 1266 } 1267 1268 assert(OpCopy && "Incoming PHI value was not copied properly"); 1269 assert(BBCopy && "Incoming PHI block was not copied properly"); 1270 PHICopy->addIncoming(OpCopy, BBCopy); 1271 } 1272 1273 Value *RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, const PHINode *PHI, 1274 ValueMapT &BBMap, 1275 ValueMapT &GlobalMap, 1276 LoopToScevMapT <S) { 1277 unsigned NumIncoming = PHI->getNumIncomingValues(); 1278 PHINode *PHICopy = 1279 Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName()); 1280 PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI()); 1281 BBMap[PHI] = PHICopy; 1282 1283 for (unsigned u = 0; u < NumIncoming; u++) 1284 addOperandToPHI(Stmt, PHI, PHICopy, PHI->getIncomingBlock(u), GlobalMap, 1285 LTS); 1286 return PHICopy; 1287 } 1288