1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the BlockGenerator and VectorBlockGenerator classes, 11 // which generate sequential code and vectorized code for a polyhedral 12 // statement, respectively. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "polly/ScopInfo.h" 17 #include "polly/CodeGen/BlockGenerators.h" 18 #include "polly/CodeGen/CodeGeneration.h" 19 #include "polly/CodeGen/IslExprBuilder.h" 20 #include "polly/Options.h" 21 #include "polly/Support/GICHelper.h" 22 #include "polly/Support/SCEVValidator.h" 23 #include "polly/Support/ScopHelper.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/RegionInfo.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/Analysis/ScalarEvolutionExpander.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 31 #include "isl/aff.h" 32 #include "isl/ast.h" 33 #include "isl/ast_build.h" 34 #include "isl/set.h" 35 #include <deque> 36 37 using namespace llvm; 38 using namespace polly; 39 40 static cl::opt<bool> Aligned("enable-polly-aligned", 41 cl::desc("Assumed aligned memory accesses."), 42 cl::Hidden, cl::init(false), cl::ZeroOrMore, 43 cl::cat(PollyCategory)); 44 45 bool polly::canSynthesize(const Instruction *I, const llvm::LoopInfo *LI, 46 ScalarEvolution *SE, const Region *R) { 47 if (!I || !SE->isSCEVable(I->getType())) 48 return false; 49 50 if (const SCEV *Scev = SE->getSCEV(const_cast<Instruction *>(I))) 51 if (!isa<SCEVCouldNotCompute>(Scev)) 52 if (!hasScalarDepsInsideRegion(Scev, R)) 53 return true; 54 55 return false; 56 } 57 58 bool polly::isIgnoredIntrinsic(const Value *V) { 59 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 60 switch (IT->getIntrinsicID()) { 61 // Lifetime markers are supported/ignored. 62 case llvm::Intrinsic::lifetime_start: 63 case llvm::Intrinsic::lifetime_end: 64 // Invariant markers are supported/ignored. 65 case llvm::Intrinsic::invariant_start: 66 case llvm::Intrinsic::invariant_end: 67 // Some misc annotations are supported/ignored. 68 case llvm::Intrinsic::var_annotation: 69 case llvm::Intrinsic::ptr_annotation: 70 case llvm::Intrinsic::annotation: 71 case llvm::Intrinsic::donothing: 72 case llvm::Intrinsic::assume: 73 case llvm::Intrinsic::expect: 74 return true; 75 default: 76 break; 77 } 78 } 79 return false; 80 } 81 82 BlockGenerator::BlockGenerator(PollyIRBuilder &B, LoopInfo &LI, 83 ScalarEvolution &SE, DominatorTree &DT, 84 ScalarAllocaMapTy &ScalarMap, 85 ScalarAllocaMapTy &PHIOpMap, 86 EscapeUsersAllocaMapTy &EscapeMap, 87 IslExprBuilder *ExprBuilder) 88 : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT), 89 EntryBB(nullptr), PHIOpMap(PHIOpMap), ScalarMap(ScalarMap), 90 EscapeMap(EscapeMap) {} 91 92 Value *BlockGenerator::getNewValue(ScopStmt &Stmt, const Value *Old, 93 ValueMapT &BBMap, ValueMapT &GlobalMap, 94 LoopToScevMapT <S, Loop *L) const { 95 // We assume constants never change. 96 // This avoids map lookups for many calls to this function. 97 if (isa<Constant>(Old)) 98 return const_cast<Value *>(Old); 99 100 if (Value *New = GlobalMap.lookup(Old)) { 101 if (Old->getType()->getScalarSizeInBits() < 102 New->getType()->getScalarSizeInBits()) 103 New = Builder.CreateTruncOrBitCast(New, Old->getType()); 104 105 return New; 106 } 107 108 if (Value *New = BBMap.lookup(Old)) 109 return New; 110 111 if (SE.isSCEVable(Old->getType())) 112 if (const SCEV *Scev = SE.getSCEVAtScope(const_cast<Value *>(Old), L)) { 113 if (!isa<SCEVCouldNotCompute>(Scev)) { 114 const SCEV *NewScev = apply(Scev, LTS, SE); 115 ValueToValueMap VTV; 116 VTV.insert(BBMap.begin(), BBMap.end()); 117 VTV.insert(GlobalMap.begin(), GlobalMap.end()); 118 NewScev = SCEVParameterRewriter::rewrite(NewScev, SE, VTV); 119 SCEVExpander Expander(SE, Stmt.getParent() 120 ->getRegion() 121 .getEntry() 122 ->getParent() 123 ->getParent() 124 ->getDataLayout(), 125 "polly"); 126 Value *Expanded = Expander.expandCodeFor(NewScev, Old->getType(), 127 Builder.GetInsertPoint()); 128 129 BBMap[Old] = Expanded; 130 return Expanded; 131 } 132 } 133 134 // A scop-constant value defined by a global or a function parameter. 135 if (isa<GlobalValue>(Old) || isa<Argument>(Old)) 136 return const_cast<Value *>(Old); 137 138 // A scop-constant value defined by an instruction executed outside the scop. 139 if (const Instruction *Inst = dyn_cast<Instruction>(Old)) 140 if (!Stmt.getParent()->getRegion().contains(Inst->getParent())) 141 return const_cast<Value *>(Old); 142 143 // The scalar dependence is neither available nor SCEVCodegenable. 144 llvm_unreachable("Unexpected scalar dependence in region!"); 145 return nullptr; 146 } 147 148 void BlockGenerator::copyInstScalar(ScopStmt &Stmt, const Instruction *Inst, 149 ValueMapT &BBMap, ValueMapT &GlobalMap, 150 LoopToScevMapT <S) { 151 // We do not generate debug intrinsics as we did not investigate how to 152 // copy them correctly. At the current state, they just crash the code 153 // generation as the meta-data operands are not correctly copied. 154 if (isa<DbgInfoIntrinsic>(Inst)) 155 return; 156 157 Instruction *NewInst = Inst->clone(); 158 159 // Replace old operands with the new ones. 160 for (Value *OldOperand : Inst->operands()) { 161 Value *NewOperand = getNewValue(Stmt, OldOperand, BBMap, GlobalMap, LTS, 162 getLoopForInst(Inst)); 163 164 if (!NewOperand) { 165 assert(!isa<StoreInst>(NewInst) && 166 "Store instructions are always needed!"); 167 delete NewInst; 168 return; 169 } 170 171 NewInst->replaceUsesOfWith(OldOperand, NewOperand); 172 } 173 174 Builder.Insert(NewInst); 175 BBMap[Inst] = NewInst; 176 177 if (!NewInst->getType()->isVoidTy()) 178 NewInst->setName("p_" + Inst->getName()); 179 } 180 181 Value *BlockGenerator::getNewAccessOperand(ScopStmt &Stmt, 182 const MemoryAccess &MA) { 183 isl_pw_multi_aff *PWAccRel; 184 isl_union_map *Schedule; 185 isl_ast_expr *Expr; 186 isl_ast_build *Build = Stmt.getAstBuild(); 187 188 assert(ExprBuilder && Build && 189 "Cannot generate new value without IslExprBuilder!"); 190 191 Schedule = isl_ast_build_get_schedule(Build); 192 PWAccRel = MA.applyScheduleToAccessRelation(Schedule); 193 194 Expr = isl_ast_build_access_from_pw_multi_aff(Build, PWAccRel); 195 Expr = isl_ast_expr_address_of(Expr); 196 197 return ExprBuilder->create(Expr); 198 } 199 200 Value *BlockGenerator::generateLocationAccessed( 201 ScopStmt &Stmt, const Instruction *Inst, const Value *Pointer, 202 ValueMapT &BBMap, ValueMapT &GlobalMap, LoopToScevMapT <S) { 203 const MemoryAccess &MA = Stmt.getAccessFor(Inst); 204 205 Value *NewPointer; 206 if (MA.hasNewAccessRelation()) 207 NewPointer = getNewAccessOperand(Stmt, MA); 208 else 209 NewPointer = 210 getNewValue(Stmt, Pointer, BBMap, GlobalMap, LTS, getLoopForInst(Inst)); 211 212 return NewPointer; 213 } 214 215 Loop *BlockGenerator::getLoopForInst(const llvm::Instruction *Inst) { 216 return LI.getLoopFor(Inst->getParent()); 217 } 218 219 Value *BlockGenerator::generateScalarLoad(ScopStmt &Stmt, const LoadInst *Load, 220 ValueMapT &BBMap, 221 ValueMapT &GlobalMap, 222 LoopToScevMapT <S) { 223 const Value *Pointer = Load->getPointerOperand(); 224 Value *NewPointer = 225 generateLocationAccessed(Stmt, Load, Pointer, BBMap, GlobalMap, LTS); 226 Value *ScalarLoad = Builder.CreateAlignedLoad( 227 NewPointer, Load->getAlignment(), Load->getName() + "_p_scalar_"); 228 return ScalarLoad; 229 } 230 231 Value *BlockGenerator::generateScalarStore(ScopStmt &Stmt, 232 const StoreInst *Store, 233 ValueMapT &BBMap, 234 ValueMapT &GlobalMap, 235 LoopToScevMapT <S) { 236 const Value *Pointer = Store->getPointerOperand(); 237 Value *NewPointer = 238 generateLocationAccessed(Stmt, Store, Pointer, BBMap, GlobalMap, LTS); 239 Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap, 240 GlobalMap, LTS, getLoopForInst(Store)); 241 242 Value *NewStore = Builder.CreateAlignedStore(ValueOperand, NewPointer, 243 Store->getAlignment()); 244 return NewStore; 245 } 246 247 void BlockGenerator::copyInstruction(ScopStmt &Stmt, const Instruction *Inst, 248 ValueMapT &BBMap, ValueMapT &GlobalMap, 249 LoopToScevMapT <S) { 250 251 // First check for possible scalar dependences for this instruction. 252 generateScalarLoads(Stmt, Inst, BBMap); 253 254 // Terminator instructions control the control flow. They are explicitly 255 // expressed in the clast and do not need to be copied. 256 if (Inst->isTerminator()) 257 return; 258 259 Loop *L = getLoopForInst(Inst); 260 if ((Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) && 261 canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) { 262 Value *NewValue = getNewValue(Stmt, Inst, BBMap, GlobalMap, LTS, L); 263 BBMap[Inst] = NewValue; 264 return; 265 } 266 267 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 268 Value *NewLoad = generateScalarLoad(Stmt, Load, BBMap, GlobalMap, LTS); 269 // Compute NewLoad before its insertion in BBMap to make the insertion 270 // deterministic. 271 BBMap[Load] = NewLoad; 272 return; 273 } 274 275 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 276 Value *NewStore = generateScalarStore(Stmt, Store, BBMap, GlobalMap, LTS); 277 // Compute NewStore before its insertion in BBMap to make the insertion 278 // deterministic. 279 BBMap[Store] = NewStore; 280 return; 281 } 282 283 if (const PHINode *PHI = dyn_cast<PHINode>(Inst)) { 284 copyPHIInstruction(Stmt, PHI, BBMap, GlobalMap, LTS); 285 return; 286 } 287 288 // Skip some special intrinsics for which we do not adjust the semantics to 289 // the new schedule. All others are handled like every other instruction. 290 if (auto *IT = dyn_cast<IntrinsicInst>(Inst)) { 291 switch (IT->getIntrinsicID()) { 292 // Lifetime markers are ignored. 293 case llvm::Intrinsic::lifetime_start: 294 case llvm::Intrinsic::lifetime_end: 295 // Invariant markers are ignored. 296 case llvm::Intrinsic::invariant_start: 297 case llvm::Intrinsic::invariant_end: 298 // Some misc annotations are ignored. 299 case llvm::Intrinsic::var_annotation: 300 case llvm::Intrinsic::ptr_annotation: 301 case llvm::Intrinsic::annotation: 302 case llvm::Intrinsic::donothing: 303 case llvm::Intrinsic::assume: 304 case llvm::Intrinsic::expect: 305 return; 306 default: 307 // Other intrinsics are copied. 308 break; 309 } 310 } 311 312 copyInstScalar(Stmt, Inst, BBMap, GlobalMap, LTS); 313 } 314 315 void BlockGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 316 LoopToScevMapT <S) { 317 assert(Stmt.isBlockStmt() && 318 "Only block statements can be copied by the block generator"); 319 320 ValueMapT BBMap; 321 322 BasicBlock *BB = Stmt.getBasicBlock(); 323 copyBB(Stmt, BB, BBMap, GlobalMap, LTS); 324 } 325 326 BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) { 327 BasicBlock *CopyBB = 328 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 329 CopyBB->setName("polly.stmt." + BB->getName()); 330 return CopyBB; 331 } 332 333 BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, 334 ValueMapT &BBMap, ValueMapT &GlobalMap, 335 LoopToScevMapT <S) { 336 BasicBlock *CopyBB = splitBB(BB); 337 copyBB(Stmt, BB, CopyBB, BBMap, GlobalMap, LTS); 338 return CopyBB; 339 } 340 341 void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB, 342 ValueMapT &BBMap, ValueMapT &GlobalMap, 343 LoopToScevMapT <S) { 344 Builder.SetInsertPoint(CopyBB->begin()); 345 EntryBB = &CopyBB->getParent()->getEntryBlock(); 346 347 for (Instruction &Inst : *BB) 348 copyInstruction(Stmt, &Inst, BBMap, GlobalMap, LTS); 349 350 // After a basic block was copied store all scalars that escape this block 351 // in their alloca. First the scalars that have dependences inside the SCoP, 352 // then the ones that might escape the SCoP. 353 generateScalarStores(Stmt, BB, BBMap, GlobalMap); 354 355 const Region &R = Stmt.getParent()->getRegion(); 356 for (Instruction &Inst : *BB) 357 handleOutsideUsers(R, &Inst, BBMap[&Inst]); 358 } 359 360 AllocaInst *BlockGenerator::getOrCreateAlloca(Instruction *ScalarBase, 361 ScalarAllocaMapTy &Map, 362 const char *NameExt, 363 bool *IsNew) { 364 365 // Check if an alloca was cached for the base instruction. 366 AllocaInst *&Addr = Map[ScalarBase]; 367 368 // If needed indicate if it was found already or will be created. 369 if (IsNew) 370 *IsNew = (Addr == nullptr); 371 372 // If no alloca was found create one and insert it in the entry block. 373 if (!Addr) { 374 auto *Ty = ScalarBase->getType(); 375 Addr = new AllocaInst(Ty, ScalarBase->getName() + NameExt); 376 Addr->insertBefore(EntryBB->getFirstInsertionPt()); 377 } 378 379 return Addr; 380 } 381 382 void BlockGenerator::handleOutsideUsers(const Region &R, Instruction *Inst, 383 Value *InstCopy) { 384 BasicBlock *ExitBB = R.getExit(); 385 386 EscapeUserVectorTy EscapeUsers; 387 for (User *U : Inst->users()) { 388 389 // Non-instruction user will never escape. 390 Instruction *UI = dyn_cast<Instruction>(U); 391 if (!UI) 392 continue; 393 394 if (R.contains(UI) && ExitBB != UI->getParent()) 395 continue; 396 397 EscapeUsers.push_back(UI); 398 } 399 400 // Exit if no escape uses were found. 401 if (EscapeUsers.empty()) 402 return; 403 404 // If there are escape users we get the alloca for this instruction and put 405 // it in the EscapeMap for later finalization. However, if the alloca was not 406 // created by an already handled scalar dependence we have to initialize it 407 // also. Lastly, if the instruction was copied multiple times we already did 408 // this and can exit. 409 if (EscapeMap.count(Inst)) 410 return; 411 412 // Get or create an escape alloca for this instruction. 413 bool IsNew; 414 AllocaInst *ScalarAddr = 415 getOrCreateAlloca(Inst, ScalarMap, ".escape", &IsNew); 416 417 // Remember that this instruction has escape uses and the escape alloca. 418 EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers)); 419 420 // If the escape alloca was just created store the instruction in there, 421 // otherwise that happened already. 422 if (IsNew) { 423 assert(InstCopy && "Except PHIs every instruction should have a copy!"); 424 Builder.CreateStore(InstCopy, ScalarAddr); 425 } 426 } 427 428 void BlockGenerator::generateScalarLoads(ScopStmt &Stmt, 429 const Instruction *Inst, 430 ValueMapT &BBMap) { 431 432 // Iterate over all memory accesses for the given instruction and handle all 433 // scalar reads. 434 if (ScopStmt::MemoryAccessList *MAL = Stmt.lookupAccessesFor(Inst)) { 435 for (MemoryAccess &MA : *MAL) { 436 if (!MA.isScalar() || !MA.isRead()) 437 continue; 438 439 Instruction *ScalarBase = cast<Instruction>(MA.getBaseAddr()); 440 Instruction *ScalarInst = MA.getAccessInstruction(); 441 442 PHINode *ScalarBasePHI = dyn_cast<PHINode>(ScalarBase); 443 444 // This is either a common scalar use (second case) or the use of a phi 445 // operand by the PHI node (first case). 446 if (ScalarBasePHI == ScalarInst) { 447 AllocaInst *PHIOpAddr = 448 getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 449 LoadInst *LI = 450 Builder.CreateLoad(PHIOpAddr, PHIOpAddr->getName() + ".reload"); 451 BBMap[ScalarBase] = LI; 452 } else { 453 // For non-PHI operand uses we look up the alloca in the ScalarMap, 454 // reload it and add the mapping to the ones in the current basic block. 455 AllocaInst *ScalarAddr = 456 getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 457 LoadInst *LI = 458 Builder.CreateLoad(ScalarAddr, ScalarAddr->getName() + ".reload"); 459 BBMap[ScalarBase] = LI; 460 } 461 } 462 } 463 } 464 465 Value *BlockGenerator::getNewScalarValue(Value *ScalarValue, const Region &R, 466 ScalarAllocaMapTy &ReloadMap, 467 ValueMapT &BBMap, 468 ValueMapT &GlobalMap) { 469 // If the value we want to store is an instruction we might have demoted it 470 // in order to make it accessible here. In such a case a reload is 471 // necessary. If it is no instruction it will always be a value that 472 // dominates the current point and we can just use it. In total there are 4 473 // options: 474 // (1) The value is no instruction ==> use the value. 475 // (2) The value is an instruction that was split out of the region prior to 476 // code generation ==> use the instruction as it dominates the region. 477 // (3) The value is an instruction: 478 // (a) The value was defined in the current block, thus a copy is in 479 // the BBMap ==> use the mapped value. 480 // (b) The value was defined in a previous block, thus we demoted it 481 // earlier ==> use the reloaded value. 482 Instruction *ScalarValueInst = dyn_cast<Instruction>(ScalarValue); 483 if (!ScalarValueInst) 484 return ScalarValue; 485 486 if (!R.contains(ScalarValueInst)) { 487 if (Value *ScalarValueCopy = GlobalMap.lookup(ScalarValueInst)) 488 return /* Case (3a) */ ScalarValueCopy; 489 else 490 return /* Case 2 */ ScalarValue; 491 } 492 493 if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst)) 494 return /* Case (3a) */ ScalarValueCopy; 495 496 // Case (3b) 497 assert(ReloadMap.count(ScalarValueInst) && 498 "ScalarInst not mapped in the block and not in the given reload map!"); 499 Value *ReloadAddr = ReloadMap[ScalarValueInst]; 500 ScalarValue = 501 Builder.CreateLoad(ReloadAddr, ReloadAddr->getName() + ".reload"); 502 503 return ScalarValue; 504 } 505 506 void BlockGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 507 ValueMapT &BBMap, 508 ValueMapT &GlobalMap) { 509 const Region &R = Stmt.getParent()->getRegion(); 510 511 assert(Stmt.isBlockStmt() && BB == Stmt.getBasicBlock() && 512 "Region statements need to use the generateScalarStores() " 513 "function in the RegionGenerator"); 514 515 // Set to remember a store to the phiops alloca of a PHINode. It is needed as 516 // we might have multiple write accesses to the same PHI and while one is the 517 // self write of the PHI (to the ScalarMap alloca) the other is the write to 518 // the operand alloca (PHIOpMap). 519 SmallPtrSet<PHINode *, 4> SeenPHIs; 520 521 // Iterate over all accesses in the given statement. 522 for (MemoryAccess *MA : Stmt) { 523 524 // Skip non-scalar and read accesses. 525 if (!MA->isScalar() || MA->isRead()) 526 continue; 527 528 Instruction *ScalarBase = cast<Instruction>(MA->getBaseAddr()); 529 Instruction *ScalarInst = MA->getAccessInstruction(); 530 PHINode *ScalarBasePHI = dyn_cast<PHINode>(ScalarBase); 531 532 // Get the alloca node for the base instruction and the value we want to 533 // store. In total there are 4 options: 534 // (1) The base is no PHI, hence it is a simple scalar def-use chain. 535 // (2) The base is a PHI, 536 // (a) and the write is caused by an operand in the block. 537 // (b) and it is the PHI self write (same as case (1)). 538 // (c) (2a) and (2b) are not distinguishable. 539 // For case (1) and (2b) we get the alloca from the scalar map and the value 540 // we want to store is initialized with the instruction attached to the 541 // memory access. For case (2a) we get the alloca from the PHI operand map 542 // and the value we want to store is initialized with the incoming value for 543 // this block. The tricky case (2c) is when both (2a) and (2b) match. This 544 // happens if the PHI operand is in the same block as the PHI. To handle 545 // that we choose the alloca of (2a) first and (2b) for the next write 546 // access to that PHI (there must be 2). 547 Value *ScalarValue = nullptr; 548 AllocaInst *ScalarAddr = nullptr; 549 550 if (!ScalarBasePHI) { 551 // Case (1) 552 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 553 ScalarValue = ScalarInst; 554 } else { 555 int PHIIdx = ScalarBasePHI->getBasicBlockIndex(BB); 556 if (ScalarBasePHI != ScalarInst) { 557 // Case (2a) 558 assert(PHIIdx >= 0 && "Bad scalar write to PHI operand"); 559 SeenPHIs.insert(ScalarBasePHI); 560 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 561 ScalarValue = ScalarBasePHI->getIncomingValue(PHIIdx); 562 } else if (PHIIdx < 0) { 563 // Case (2b) 564 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 565 ScalarValue = ScalarInst; 566 } else { 567 // Case (2c) 568 if (SeenPHIs.insert(ScalarBasePHI).second) { 569 // First access ==> same as (2a) 570 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 571 ScalarValue = ScalarBasePHI->getIncomingValue(PHIIdx); 572 } else { 573 // Second access ==> same as (2b) 574 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 575 ScalarValue = ScalarInst; 576 } 577 } 578 } 579 580 ScalarValue = 581 getNewScalarValue(ScalarValue, R, ScalarMap, BBMap, GlobalMap); 582 Builder.CreateStore(ScalarValue, ScalarAddr); 583 } 584 } 585 586 void BlockGenerator::createScalarInitialization(Region &R, 587 ValueMapT &GlobalMap) { 588 // The split block __just before__ the region and optimized region. 589 BasicBlock *SplitBB = R.getEnteringBlock(); 590 BranchInst *SplitBBTerm = cast<BranchInst>(SplitBB->getTerminator()); 591 assert(SplitBBTerm->getNumSuccessors() == 2 && "Bad region entering block!"); 592 593 // Get the start block of the __optimized__ region. 594 BasicBlock *StartBB = SplitBBTerm->getSuccessor(0); 595 if (StartBB == R.getEntry()) 596 StartBB = SplitBBTerm->getSuccessor(1); 597 598 // For each PHI predecessor outside the region store the incoming operand 599 // value prior to entering the optimized region. 600 Builder.SetInsertPoint(StartBB->getTerminator()); 601 602 ScalarAllocaMapTy EmptyMap; 603 for (const auto &PHIOpMapping : PHIOpMap) { 604 const PHINode *PHI = cast<PHINode>(PHIOpMapping.getFirst()); 605 606 // Check if this PHI has the split block as predecessor (that is the only 607 // possible predecessor outside the SCoP). 608 int idx = PHI->getBasicBlockIndex(SplitBB); 609 if (idx < 0) 610 continue; 611 612 Value *ScalarValue = PHI->getIncomingValue(idx); 613 ScalarValue = 614 getNewScalarValue(ScalarValue, R, EmptyMap, GlobalMap, GlobalMap); 615 616 // If the split block is the predecessor initialize the PHI operator alloca. 617 Builder.CreateStore(ScalarValue, PHIOpMapping.getSecond()); 618 } 619 } 620 621 void BlockGenerator::createScalarFinalization(Region &R) { 622 // The exit block of the __unoptimized__ region. 623 BasicBlock *ExitBB = R.getExitingBlock(); 624 // The merge block __just after__ the region and the optimized region. 625 BasicBlock *MergeBB = R.getExit(); 626 627 // The exit block of the __optimized__ region. 628 BasicBlock *OptExitBB = *(pred_begin(MergeBB)); 629 if (OptExitBB == ExitBB) 630 OptExitBB = *(++pred_begin(MergeBB)); 631 632 Builder.SetInsertPoint(OptExitBB->getTerminator()); 633 for (const auto &EscapeMapping : EscapeMap) { 634 // Extract the escaping instruction and the escaping users as well as the 635 // alloca the instruction was demoted to. 636 Instruction *EscapeInst = EscapeMapping.getFirst(); 637 const auto &EscapeMappingValue = EscapeMapping.getSecond(); 638 const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second; 639 AllocaInst *ScalarAddr = EscapeMappingValue.first; 640 641 // Reload the demoted instruction in the optimized version of the SCoP. 642 Instruction *EscapeInstReload = 643 Builder.CreateLoad(ScalarAddr, EscapeInst->getName() + ".final_reload"); 644 645 // Create the merge PHI that merges the optimized and unoptimized version. 646 PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2, 647 EscapeInst->getName() + ".merge"); 648 MergePHI->insertBefore(MergeBB->getFirstInsertionPt()); 649 650 // Add the respective values to the merge PHI. 651 MergePHI->addIncoming(EscapeInstReload, OptExitBB); 652 MergePHI->addIncoming(EscapeInst, ExitBB); 653 654 // The information of scalar evolution about the escaping instruction needs 655 // to be revoked so the new merged instruction will be used. 656 if (SE.isSCEVable(EscapeInst->getType())) 657 SE.forgetValue(EscapeInst); 658 659 // Replace all uses of the demoted instruction with the merge PHI. 660 for (Instruction *EUser : EscapeUsers) 661 EUser->replaceUsesOfWith(EscapeInst, MergePHI); 662 } 663 } 664 665 void BlockGenerator::finalizeSCoP(Scop &S, ValueMapT &GlobalMap) { 666 createScalarInitialization(S.getRegion(), GlobalMap); 667 createScalarFinalization(S.getRegion()); 668 } 669 670 VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen, 671 VectorValueMapT &GlobalMaps, 672 std::vector<LoopToScevMapT> &VLTS, 673 isl_map *Schedule) 674 : BlockGenerator(BlockGen), GlobalMaps(GlobalMaps), VLTS(VLTS), 675 Schedule(Schedule) { 676 assert(GlobalMaps.size() > 1 && "Only one vector lane found"); 677 assert(Schedule && "No statement domain provided"); 678 } 679 680 Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, const Value *Old, 681 ValueMapT &VectorMap, 682 VectorValueMapT &ScalarMaps, 683 Loop *L) { 684 if (Value *NewValue = VectorMap.lookup(Old)) 685 return NewValue; 686 687 int Width = getVectorWidth(); 688 689 Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width)); 690 691 for (int Lane = 0; Lane < Width; Lane++) 692 Vector = Builder.CreateInsertElement( 693 Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], GlobalMaps[Lane], 694 VLTS[Lane], L), 695 Builder.getInt32(Lane)); 696 697 VectorMap[Old] = Vector; 698 699 return Vector; 700 } 701 702 Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) { 703 PointerType *PointerTy = dyn_cast<PointerType>(Val->getType()); 704 assert(PointerTy && "PointerType expected"); 705 706 Type *ScalarType = PointerTy->getElementType(); 707 VectorType *VectorType = VectorType::get(ScalarType, Width); 708 709 return PointerType::getUnqual(VectorType); 710 } 711 712 Value *VectorBlockGenerator::generateStrideOneLoad( 713 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps, 714 bool NegativeStride = false) { 715 unsigned VectorWidth = getVectorWidth(); 716 const Value *Pointer = Load->getPointerOperand(); 717 Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth); 718 unsigned Offset = NegativeStride ? VectorWidth - 1 : 0; 719 720 Value *NewPointer = nullptr; 721 NewPointer = generateLocationAccessed(Stmt, Load, Pointer, ScalarMaps[Offset], 722 GlobalMaps[Offset], VLTS[Offset]); 723 Value *VectorPtr = 724 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 725 LoadInst *VecLoad = 726 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full"); 727 if (!Aligned) 728 VecLoad->setAlignment(8); 729 730 if (NegativeStride) { 731 SmallVector<Constant *, 16> Indices; 732 for (int i = VectorWidth - 1; i >= 0; i--) 733 Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i)); 734 Constant *SV = llvm::ConstantVector::get(Indices); 735 Value *RevVecLoad = Builder.CreateShuffleVector( 736 VecLoad, VecLoad, SV, Load->getName() + "_reverse"); 737 return RevVecLoad; 738 } 739 740 return VecLoad; 741 } 742 743 Value *VectorBlockGenerator::generateStrideZeroLoad(ScopStmt &Stmt, 744 const LoadInst *Load, 745 ValueMapT &BBMap) { 746 const Value *Pointer = Load->getPointerOperand(); 747 Type *VectorPtrType = getVectorPtrTy(Pointer, 1); 748 Value *NewPointer = generateLocationAccessed(Stmt, Load, Pointer, BBMap, 749 GlobalMaps[0], VLTS[0]); 750 Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType, 751 Load->getName() + "_p_vec_p"); 752 LoadInst *ScalarLoad = 753 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one"); 754 755 if (!Aligned) 756 ScalarLoad->setAlignment(8); 757 758 Constant *SplatVector = Constant::getNullValue( 759 VectorType::get(Builder.getInt32Ty(), getVectorWidth())); 760 761 Value *VectorLoad = Builder.CreateShuffleVector( 762 ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat"); 763 return VectorLoad; 764 } 765 766 Value *VectorBlockGenerator::generateUnknownStrideLoad( 767 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps) { 768 int VectorWidth = getVectorWidth(); 769 const Value *Pointer = Load->getPointerOperand(); 770 VectorType *VectorType = VectorType::get( 771 dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth); 772 773 Value *Vector = UndefValue::get(VectorType); 774 775 for (int i = 0; i < VectorWidth; i++) { 776 Value *NewPointer = generateLocationAccessed( 777 Stmt, Load, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 778 Value *ScalarLoad = 779 Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_"); 780 Vector = Builder.CreateInsertElement( 781 Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_"); 782 } 783 784 return Vector; 785 } 786 787 void VectorBlockGenerator::generateLoad(ScopStmt &Stmt, const LoadInst *Load, 788 ValueMapT &VectorMap, 789 VectorValueMapT &ScalarMaps) { 790 if (!VectorType::isValidElementType(Load->getType())) { 791 for (int i = 0; i < getVectorWidth(); i++) 792 ScalarMaps[i][Load] = 793 generateScalarLoad(Stmt, Load, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 794 return; 795 } 796 797 const MemoryAccess &Access = Stmt.getAccessFor(Load); 798 799 // Make sure we have scalar values available to access the pointer to 800 // the data location. 801 extractScalarValues(Load, VectorMap, ScalarMaps); 802 803 Value *NewLoad; 804 if (Access.isStrideZero(isl_map_copy(Schedule))) 805 NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0]); 806 else if (Access.isStrideOne(isl_map_copy(Schedule))) 807 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps); 808 else if (Access.isStrideX(isl_map_copy(Schedule), -1)) 809 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, true); 810 else 811 NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps); 812 813 VectorMap[Load] = NewLoad; 814 } 815 816 void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, 817 const UnaryInstruction *Inst, 818 ValueMapT &VectorMap, 819 VectorValueMapT &ScalarMaps) { 820 int VectorWidth = getVectorWidth(); 821 Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap, 822 ScalarMaps, getLoopForInst(Inst)); 823 824 assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction"); 825 826 const CastInst *Cast = dyn_cast<CastInst>(Inst); 827 VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth); 828 VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType); 829 } 830 831 void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, 832 const BinaryOperator *Inst, 833 ValueMapT &VectorMap, 834 VectorValueMapT &ScalarMaps) { 835 Loop *L = getLoopForInst(Inst); 836 Value *OpZero = Inst->getOperand(0); 837 Value *OpOne = Inst->getOperand(1); 838 839 Value *NewOpZero, *NewOpOne; 840 NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L); 841 NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L); 842 843 Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne, 844 Inst->getName() + "p_vec"); 845 VectorMap[Inst] = NewInst; 846 } 847 848 void VectorBlockGenerator::copyStore(ScopStmt &Stmt, const StoreInst *Store, 849 ValueMapT &VectorMap, 850 VectorValueMapT &ScalarMaps) { 851 const MemoryAccess &Access = Stmt.getAccessFor(Store); 852 853 const Value *Pointer = Store->getPointerOperand(); 854 Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap, 855 ScalarMaps, getLoopForInst(Store)); 856 857 // Make sure we have scalar values available to access the pointer to 858 // the data location. 859 extractScalarValues(Store, VectorMap, ScalarMaps); 860 861 if (Access.isStrideOne(isl_map_copy(Schedule))) { 862 Type *VectorPtrType = getVectorPtrTy(Pointer, getVectorWidth()); 863 Value *NewPointer = generateLocationAccessed( 864 Stmt, Store, Pointer, ScalarMaps[0], GlobalMaps[0], VLTS[0]); 865 866 Value *VectorPtr = 867 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 868 StoreInst *Store = Builder.CreateStore(Vector, VectorPtr); 869 870 if (!Aligned) 871 Store->setAlignment(8); 872 } else { 873 for (unsigned i = 0; i < ScalarMaps.size(); i++) { 874 Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i)); 875 Value *NewPointer = generateLocationAccessed( 876 Stmt, Store, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 877 Builder.CreateStore(Scalar, NewPointer); 878 } 879 } 880 } 881 882 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst, 883 ValueMapT &VectorMap) { 884 for (Value *Operand : Inst->operands()) 885 if (VectorMap.count(Operand)) 886 return true; 887 return false; 888 } 889 890 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst, 891 ValueMapT &VectorMap, 892 VectorValueMapT &ScalarMaps) { 893 bool HasVectorOperand = false; 894 int VectorWidth = getVectorWidth(); 895 896 for (Value *Operand : Inst->operands()) { 897 ValueMapT::iterator VecOp = VectorMap.find(Operand); 898 899 if (VecOp == VectorMap.end()) 900 continue; 901 902 HasVectorOperand = true; 903 Value *NewVector = VecOp->second; 904 905 for (int i = 0; i < VectorWidth; ++i) { 906 ValueMapT &SM = ScalarMaps[i]; 907 908 // If there is one scalar extracted, all scalar elements should have 909 // already been extracted by the code here. So no need to check for the 910 // existance of all of them. 911 if (SM.count(Operand)) 912 break; 913 914 SM[Operand] = 915 Builder.CreateExtractElement(NewVector, Builder.getInt32(i)); 916 } 917 } 918 919 return HasVectorOperand; 920 } 921 922 void VectorBlockGenerator::copyInstScalarized(ScopStmt &Stmt, 923 const Instruction *Inst, 924 ValueMapT &VectorMap, 925 VectorValueMapT &ScalarMaps) { 926 bool HasVectorOperand; 927 int VectorWidth = getVectorWidth(); 928 929 HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps); 930 931 for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++) 932 BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane], 933 GlobalMaps[VectorLane], VLTS[VectorLane]); 934 935 if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand) 936 return; 937 938 // Make the result available as vector value. 939 VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth); 940 Value *Vector = UndefValue::get(VectorType); 941 942 for (int i = 0; i < VectorWidth; i++) 943 Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst], 944 Builder.getInt32(i)); 945 946 VectorMap[Inst] = Vector; 947 } 948 949 int VectorBlockGenerator::getVectorWidth() { return GlobalMaps.size(); } 950 951 void VectorBlockGenerator::copyInstruction(ScopStmt &Stmt, 952 const Instruction *Inst, 953 ValueMapT &VectorMap, 954 VectorValueMapT &ScalarMaps) { 955 // Terminator instructions control the control flow. They are explicitly 956 // expressed in the clast and do not need to be copied. 957 if (Inst->isTerminator()) 958 return; 959 960 if (canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) 961 return; 962 963 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 964 generateLoad(Stmt, Load, VectorMap, ScalarMaps); 965 return; 966 } 967 968 if (hasVectorOperands(Inst, VectorMap)) { 969 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 970 copyStore(Stmt, Store, VectorMap, ScalarMaps); 971 return; 972 } 973 974 if (const UnaryInstruction *Unary = dyn_cast<UnaryInstruction>(Inst)) { 975 copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps); 976 return; 977 } 978 979 if (const BinaryOperator *Binary = dyn_cast<BinaryOperator>(Inst)) { 980 copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps); 981 return; 982 } 983 984 // Falltrough: We generate scalar instructions, if we don't know how to 985 // generate vector code. 986 } 987 988 copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps); 989 } 990 991 void VectorBlockGenerator::copyStmt(ScopStmt &Stmt) { 992 assert(Stmt.isBlockStmt() && "TODO: Only block statements can be copied by " 993 "the vector block generator"); 994 995 BasicBlock *BB = Stmt.getBasicBlock(); 996 BasicBlock *CopyBB = 997 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 998 CopyBB->setName("polly.stmt." + BB->getName()); 999 Builder.SetInsertPoint(CopyBB->begin()); 1000 1001 // Create two maps that store the mapping from the original instructions of 1002 // the old basic block to their copies in the new basic block. Those maps 1003 // are basic block local. 1004 // 1005 // As vector code generation is supported there is one map for scalar values 1006 // and one for vector values. 1007 // 1008 // In case we just do scalar code generation, the vectorMap is not used and 1009 // the scalarMap has just one dimension, which contains the mapping. 1010 // 1011 // In case vector code generation is done, an instruction may either appear 1012 // in the vector map once (as it is calculating >vectorwidth< values at a 1013 // time. Or (if the values are calculated using scalar operations), it 1014 // appears once in every dimension of the scalarMap. 1015 VectorValueMapT ScalarBlockMap(getVectorWidth()); 1016 ValueMapT VectorBlockMap; 1017 1018 for (Instruction &Inst : *BB) 1019 copyInstruction(Stmt, &Inst, VectorBlockMap, ScalarBlockMap); 1020 } 1021 1022 BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB, 1023 BasicBlock *BBCopy) { 1024 1025 BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock(); 1026 BasicBlock *BBCopyIDom = BlockMap.lookup(BBIDom); 1027 1028 if (BBCopyIDom) 1029 DT.changeImmediateDominator(BBCopy, BBCopyIDom); 1030 1031 return BBCopyIDom; 1032 } 1033 1034 void RegionGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 1035 LoopToScevMapT <S) { 1036 assert(Stmt.isRegionStmt() && 1037 "Only region statements can be copied by the block generator"); 1038 1039 // Forget all old mappings. 1040 BlockMap.clear(); 1041 RegionMaps.clear(); 1042 IncompletePHINodeMap.clear(); 1043 1044 // The region represented by the statement. 1045 Region *R = Stmt.getRegion(); 1046 1047 // Create a dedicated entry for the region where we can reload all demoted 1048 // inputs. 1049 BasicBlock *EntryBB = R->getEntry(); 1050 BasicBlock *EntryBBCopy = 1051 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 1052 EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry"); 1053 Builder.SetInsertPoint(EntryBBCopy->begin()); 1054 1055 for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI) 1056 if (!R->contains(*PI)) 1057 BlockMap[*PI] = EntryBBCopy; 1058 1059 // Iterate over all blocks in the region in a breadth-first search. 1060 std::deque<BasicBlock *> Blocks; 1061 SmallPtrSet<BasicBlock *, 8> SeenBlocks; 1062 Blocks.push_back(EntryBB); 1063 SeenBlocks.insert(EntryBB); 1064 1065 while (!Blocks.empty()) { 1066 BasicBlock *BB = Blocks.front(); 1067 Blocks.pop_front(); 1068 1069 // First split the block and update dominance information. 1070 BasicBlock *BBCopy = splitBB(BB); 1071 BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy); 1072 1073 // In order to remap PHI nodes we store also basic block mappings. 1074 BlockMap[BB] = BBCopy; 1075 1076 // Get the mapping for this block and initialize it with the mapping 1077 // available at its immediate dominator (in the new region). 1078 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1079 RegionMap = RegionMaps[BBCopyIDom]; 1080 1081 // Copy the block with the BlockGenerator. 1082 copyBB(Stmt, BB, BBCopy, RegionMap, GlobalMap, LTS); 1083 1084 // In order to remap PHI nodes we store also basic block mappings. 1085 BlockMap[BB] = BBCopy; 1086 1087 // Add values to incomplete PHI nodes waiting for this block to be copied. 1088 for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB]) 1089 addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, 1090 GlobalMap, LTS); 1091 IncompletePHINodeMap[BB].clear(); 1092 1093 // And continue with new successors inside the region. 1094 for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++) 1095 if (R->contains(*SI) && SeenBlocks.insert(*SI).second) 1096 Blocks.push_back(*SI); 1097 } 1098 1099 // Now create a new dedicated region exit block and add it to the region map. 1100 BasicBlock *ExitBBCopy = 1101 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 1102 ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit"); 1103 BlockMap[R->getExit()] = ExitBBCopy; 1104 1105 repairDominance(R->getExit(), ExitBBCopy); 1106 1107 // As the block generator doesn't handle control flow we need to add the 1108 // region control flow by hand after all blocks have been copied. 1109 for (BasicBlock *BB : SeenBlocks) { 1110 1111 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 1112 1113 BasicBlock *BBCopy = BlockMap[BB]; 1114 Instruction *BICopy = BBCopy->getTerminator(); 1115 1116 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1117 RegionMap.insert(BlockMap.begin(), BlockMap.end()); 1118 1119 Builder.SetInsertPoint(BBCopy); 1120 copyInstScalar(Stmt, BI, RegionMap, GlobalMap, LTS); 1121 BICopy->eraseFromParent(); 1122 } 1123 1124 // Add counting PHI nodes to all loops in the region that can be used as 1125 // replacement for SCEVs refering to the old loop. 1126 for (BasicBlock *BB : SeenBlocks) { 1127 Loop *L = LI.getLoopFor(BB); 1128 if (L == nullptr || L->getHeader() != BB) 1129 continue; 1130 1131 BasicBlock *BBCopy = BlockMap[BB]; 1132 Value *NullVal = Builder.getInt32(0); 1133 PHINode *LoopPHI = 1134 PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv"); 1135 Instruction *LoopPHIInc = BinaryOperator::CreateAdd( 1136 LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc"); 1137 LoopPHI->insertBefore(BBCopy->begin()); 1138 LoopPHIInc->insertBefore(BBCopy->getTerminator()); 1139 1140 for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) { 1141 if (!R->contains(PredBB)) 1142 continue; 1143 if (L->contains(PredBB)) 1144 LoopPHI->addIncoming(LoopPHIInc, BlockMap[PredBB]); 1145 else 1146 LoopPHI->addIncoming(NullVal, BlockMap[PredBB]); 1147 } 1148 1149 for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy))) 1150 if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0) 1151 LoopPHI->addIncoming(NullVal, PredBBCopy); 1152 1153 LTS[L] = SE.getUnknown(LoopPHI); 1154 } 1155 1156 // Add all mappings from the region to the global map so outside uses will use 1157 // the copied instructions. 1158 for (auto &BBMap : RegionMaps) 1159 GlobalMap.insert(BBMap.second.begin(), BBMap.second.end()); 1160 1161 // Reset the old insert point for the build. 1162 Builder.SetInsertPoint(ExitBBCopy->begin()); 1163 } 1164 1165 void RegionGenerator::generateScalarLoads(ScopStmt &Stmt, 1166 const Instruction *Inst, 1167 ValueMapT &BBMap) { 1168 1169 // Inside a non-affine region PHI nodes are copied not demoted. Once the 1170 // phi is copied it will reload all inputs from outside the region, hence 1171 // we do not need to generate code for the read access of the operands of a 1172 // PHI. 1173 if (isa<PHINode>(Inst)) 1174 return; 1175 1176 return BlockGenerator::generateScalarLoads(Stmt, Inst, BBMap); 1177 } 1178 1179 void RegionGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 1180 ValueMapT &BBMap, 1181 ValueMapT &GlobalMap) { 1182 const Region &R = Stmt.getParent()->getRegion(); 1183 1184 Region *StmtR = Stmt.getRegion(); 1185 assert(StmtR && "Block statements need to use the generateScalarStores() " 1186 "function in the BlockGenerator"); 1187 1188 BasicBlock *ExitBB = StmtR->getExit(); 1189 1190 // For region statements three kinds of scalar stores exists: 1191 // (1) A definition used by a non-phi instruction outside the region. 1192 // (2) A phi-instruction in the region entry. 1193 // (3) A write to a phi instruction in the region exit. 1194 // The last case is the tricky one since we do not know anymore which 1195 // predecessor of the exit needs to store the operand value that doesn't 1196 // have a definition in the region. Therefore, we have to check in each 1197 // block in the region if we should store the value or not. 1198 1199 // Iterate over all accesses in the given statement. 1200 for (MemoryAccess *MA : Stmt) { 1201 1202 // Skip non-scalar and read accesses. 1203 if (!MA->isScalar() || MA->isRead()) 1204 continue; 1205 1206 Instruction *ScalarBase = cast<Instruction>(MA->getBaseAddr()); 1207 Instruction *ScalarInst = MA->getAccessInstruction(); 1208 PHINode *ScalarBasePHI = dyn_cast<PHINode>(ScalarBase); 1209 1210 Value *ScalarValue = nullptr; 1211 AllocaInst *ScalarAddr = nullptr; 1212 1213 if (!ScalarBasePHI) { 1214 // Case (1) 1215 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 1216 ScalarValue = ScalarInst; 1217 } else if (ScalarBasePHI->getParent() != ExitBB) { 1218 // Case (2) 1219 assert(ScalarBasePHI->getParent() == StmtR->getEntry() && 1220 "Bad PHI self write in non-affine region"); 1221 assert(ScalarBase == ScalarInst && 1222 "Bad PHI self write in non-affine region"); 1223 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 1224 ScalarValue = ScalarInst; 1225 } else { 1226 int PHIIdx = ScalarBasePHI->getBasicBlockIndex(BB); 1227 // Skip accesses we will not handle in this basic block but in another one 1228 // in the statement region. 1229 if (PHIIdx < 0) 1230 continue; 1231 1232 // Case (3) 1233 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 1234 ScalarValue = ScalarBasePHI->getIncomingValue(PHIIdx); 1235 } 1236 1237 ScalarValue = 1238 getNewScalarValue(ScalarValue, R, ScalarMap, BBMap, GlobalMap); 1239 Builder.CreateStore(ScalarValue, ScalarAddr); 1240 } 1241 } 1242 1243 void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, const PHINode *PHI, 1244 PHINode *PHICopy, BasicBlock *IncomingBB, 1245 ValueMapT &GlobalMap, 1246 LoopToScevMapT <S) { 1247 Region *StmtR = Stmt.getRegion(); 1248 1249 // If the incoming block was not yet copied mark this PHI as incomplete. 1250 // Once the block will be copied the incoming value will be added. 1251 BasicBlock *BBCopy = BlockMap[IncomingBB]; 1252 if (!BBCopy) { 1253 assert(StmtR->contains(IncomingBB) && 1254 "Bad incoming block for PHI in non-affine region"); 1255 IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy)); 1256 return; 1257 } 1258 1259 Value *OpCopy = nullptr; 1260 if (StmtR->contains(IncomingBB)) { 1261 assert(RegionMaps.count(BBCopy) && 1262 "Incoming PHI block did not have a BBMap"); 1263 ValueMapT &BBCopyMap = RegionMaps[BBCopy]; 1264 1265 Value *Op = PHI->getIncomingValueForBlock(IncomingBB); 1266 OpCopy = 1267 getNewValue(Stmt, Op, BBCopyMap, GlobalMap, LTS, getLoopForInst(PHI)); 1268 } else { 1269 1270 if (PHICopy->getBasicBlockIndex(BBCopy) >= 0) 1271 return; 1272 1273 AllocaInst *PHIOpAddr = 1274 getOrCreateAlloca(const_cast<PHINode *>(PHI), PHIOpMap, ".phiops"); 1275 OpCopy = new LoadInst(PHIOpAddr, PHIOpAddr->getName() + ".reload", 1276 BlockMap[IncomingBB]->getTerminator()); 1277 } 1278 1279 assert(OpCopy && "Incoming PHI value was not copied properly"); 1280 assert(BBCopy && "Incoming PHI block was not copied properly"); 1281 PHICopy->addIncoming(OpCopy, BBCopy); 1282 } 1283 1284 Value *RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, const PHINode *PHI, 1285 ValueMapT &BBMap, 1286 ValueMapT &GlobalMap, 1287 LoopToScevMapT <S) { 1288 unsigned NumIncoming = PHI->getNumIncomingValues(); 1289 PHINode *PHICopy = 1290 Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName()); 1291 PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI()); 1292 BBMap[PHI] = PHICopy; 1293 1294 for (unsigned u = 0; u < NumIncoming; u++) 1295 addOperandToPHI(Stmt, PHI, PHICopy, PHI->getIncomingBlock(u), GlobalMap, 1296 LTS); 1297 return PHICopy; 1298 } 1299