1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the BlockGenerator and VectorBlockGenerator classes, 11 // which generate sequential code and vectorized code for a polyhedral 12 // statement, respectively. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "polly/ScopInfo.h" 17 #include "polly/CodeGen/BlockGenerators.h" 18 #include "polly/CodeGen/CodeGeneration.h" 19 #include "polly/CodeGen/IslExprBuilder.h" 20 #include "polly/Options.h" 21 #include "polly/Support/GICHelper.h" 22 #include "polly/Support/SCEVValidator.h" 23 #include "polly/Support/ScopHelper.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/RegionInfo.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/Analysis/ScalarEvolutionExpander.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 31 #include "isl/aff.h" 32 #include "isl/ast.h" 33 #include "isl/ast_build.h" 34 #include "isl/set.h" 35 #include <deque> 36 37 using namespace llvm; 38 using namespace polly; 39 40 static cl::opt<bool> Aligned("enable-polly-aligned", 41 cl::desc("Assumed aligned memory accesses."), 42 cl::Hidden, cl::init(false), cl::ZeroOrMore, 43 cl::cat(PollyCategory)); 44 45 bool polly::canSynthesize(const Value *V, const llvm::LoopInfo *LI, 46 ScalarEvolution *SE, const Region *R) { 47 if (!V || !SE->isSCEVable(V->getType())) 48 return false; 49 50 if (const SCEV *Scev = SE->getSCEV(const_cast<Value *>(V))) 51 if (!isa<SCEVCouldNotCompute>(Scev)) 52 if (!hasScalarDepsInsideRegion(Scev, R)) 53 return true; 54 55 return false; 56 } 57 58 bool polly::isIgnoredIntrinsic(const Value *V) { 59 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 60 switch (IT->getIntrinsicID()) { 61 // Lifetime markers are supported/ignored. 62 case llvm::Intrinsic::lifetime_start: 63 case llvm::Intrinsic::lifetime_end: 64 // Invariant markers are supported/ignored. 65 case llvm::Intrinsic::invariant_start: 66 case llvm::Intrinsic::invariant_end: 67 // Some misc annotations are supported/ignored. 68 case llvm::Intrinsic::var_annotation: 69 case llvm::Intrinsic::ptr_annotation: 70 case llvm::Intrinsic::annotation: 71 case llvm::Intrinsic::donothing: 72 case llvm::Intrinsic::assume: 73 case llvm::Intrinsic::expect: 74 return true; 75 default: 76 break; 77 } 78 } 79 return false; 80 } 81 82 BlockGenerator::BlockGenerator(PollyIRBuilder &B, LoopInfo &LI, 83 ScalarEvolution &SE, DominatorTree &DT, 84 ScalarAllocaMapTy &ScalarMap, 85 ScalarAllocaMapTy &PHIOpMap, 86 EscapeUsersAllocaMapTy &EscapeMap, 87 IslExprBuilder *ExprBuilder) 88 : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT), 89 EntryBB(nullptr), PHIOpMap(PHIOpMap), ScalarMap(ScalarMap), 90 EscapeMap(EscapeMap) {} 91 92 Value *BlockGenerator::getNewValue(ScopStmt &Stmt, const Value *Old, 93 ValueMapT &BBMap, ValueMapT &GlobalMap, 94 LoopToScevMapT <S, Loop *L) const { 95 // We assume constants never change. 96 // This avoids map lookups for many calls to this function. 97 if (isa<Constant>(Old)) 98 return const_cast<Value *>(Old); 99 100 if (Value *New = GlobalMap.lookup(Old)) { 101 if (Old->getType()->getScalarSizeInBits() < 102 New->getType()->getScalarSizeInBits()) 103 New = Builder.CreateTruncOrBitCast(New, Old->getType()); 104 105 return New; 106 } 107 108 if (Value *New = BBMap.lookup(Old)) 109 return New; 110 111 if (SE.isSCEVable(Old->getType())) 112 if (const SCEV *Scev = SE.getSCEVAtScope(const_cast<Value *>(Old), L)) { 113 if (!isa<SCEVCouldNotCompute>(Scev)) { 114 const SCEV *NewScev = apply(Scev, LTS, SE); 115 ValueToValueMap VTV; 116 VTV.insert(BBMap.begin(), BBMap.end()); 117 VTV.insert(GlobalMap.begin(), GlobalMap.end()); 118 NewScev = SCEVParameterRewriter::rewrite(NewScev, SE, VTV); 119 SCEVExpander Expander(SE, Stmt.getParent() 120 ->getRegion() 121 .getEntry() 122 ->getParent() 123 ->getParent() 124 ->getDataLayout(), 125 "polly"); 126 assert(Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() && 127 "Only instructions can be insert points for SCEVExpander"); 128 Value *Expanded = Expander.expandCodeFor(NewScev, Old->getType(), 129 Builder.GetInsertPoint()); 130 131 BBMap[Old] = Expanded; 132 return Expanded; 133 } 134 } 135 136 // A scop-constant value defined by a global or a function parameter. 137 if (isa<GlobalValue>(Old) || isa<Argument>(Old)) 138 return const_cast<Value *>(Old); 139 140 // A scop-constant value defined by an instruction executed outside the scop. 141 if (const Instruction *Inst = dyn_cast<Instruction>(Old)) 142 if (!Stmt.getParent()->getRegion().contains(Inst->getParent())) 143 return const_cast<Value *>(Old); 144 145 // The scalar dependence is neither available nor SCEVCodegenable. 146 llvm_unreachable("Unexpected scalar dependence in region!"); 147 return nullptr; 148 } 149 150 void BlockGenerator::copyInstScalar(ScopStmt &Stmt, const Instruction *Inst, 151 ValueMapT &BBMap, ValueMapT &GlobalMap, 152 LoopToScevMapT <S) { 153 // We do not generate debug intrinsics as we did not investigate how to 154 // copy them correctly. At the current state, they just crash the code 155 // generation as the meta-data operands are not correctly copied. 156 if (isa<DbgInfoIntrinsic>(Inst)) 157 return; 158 159 Instruction *NewInst = Inst->clone(); 160 161 // Replace old operands with the new ones. 162 for (Value *OldOperand : Inst->operands()) { 163 Value *NewOperand = getNewValue(Stmt, OldOperand, BBMap, GlobalMap, LTS, 164 getLoopForInst(Inst)); 165 166 if (!NewOperand) { 167 assert(!isa<StoreInst>(NewInst) && 168 "Store instructions are always needed!"); 169 delete NewInst; 170 return; 171 } 172 173 NewInst->replaceUsesOfWith(OldOperand, NewOperand); 174 } 175 176 Builder.Insert(NewInst); 177 BBMap[Inst] = NewInst; 178 179 if (!NewInst->getType()->isVoidTy()) 180 NewInst->setName("p_" + Inst->getName()); 181 } 182 183 Value *BlockGenerator::getNewAccessOperand(ScopStmt &Stmt, 184 const MemoryAccess &MA) { 185 isl_pw_multi_aff *PWAccRel; 186 isl_union_map *Schedule; 187 isl_ast_expr *Expr; 188 isl_ast_build *Build = Stmt.getAstBuild(); 189 190 assert(ExprBuilder && Build && 191 "Cannot generate new value without IslExprBuilder!"); 192 193 Schedule = isl_ast_build_get_schedule(Build); 194 PWAccRel = MA.applyScheduleToAccessRelation(Schedule); 195 196 Expr = isl_ast_build_access_from_pw_multi_aff(Build, PWAccRel); 197 Expr = isl_ast_expr_address_of(Expr); 198 199 return ExprBuilder->create(Expr); 200 } 201 202 Value *BlockGenerator::generateLocationAccessed( 203 ScopStmt &Stmt, const Instruction *Inst, const Value *Pointer, 204 ValueMapT &BBMap, ValueMapT &GlobalMap, LoopToScevMapT <S) { 205 const MemoryAccess &MA = Stmt.getAccessFor(Inst); 206 207 Value *NewPointer; 208 if (MA.hasNewAccessRelation()) 209 NewPointer = getNewAccessOperand(Stmt, MA); 210 else 211 NewPointer = 212 getNewValue(Stmt, Pointer, BBMap, GlobalMap, LTS, getLoopForInst(Inst)); 213 214 return NewPointer; 215 } 216 217 Loop *BlockGenerator::getLoopForInst(const llvm::Instruction *Inst) { 218 return LI.getLoopFor(Inst->getParent()); 219 } 220 221 Value *BlockGenerator::generateScalarLoad(ScopStmt &Stmt, const LoadInst *Load, 222 ValueMapT &BBMap, 223 ValueMapT &GlobalMap, 224 LoopToScevMapT <S) { 225 const Value *Pointer = Load->getPointerOperand(); 226 Value *NewPointer = 227 generateLocationAccessed(Stmt, Load, Pointer, BBMap, GlobalMap, LTS); 228 Value *ScalarLoad = Builder.CreateAlignedLoad( 229 NewPointer, Load->getAlignment(), Load->getName() + "_p_scalar_"); 230 return ScalarLoad; 231 } 232 233 void BlockGenerator::generateScalarStore(ScopStmt &Stmt, const StoreInst *Store, 234 ValueMapT &BBMap, ValueMapT &GlobalMap, 235 LoopToScevMapT <S) { 236 const Value *Pointer = Store->getPointerOperand(); 237 Value *NewPointer = 238 generateLocationAccessed(Stmt, Store, Pointer, BBMap, GlobalMap, LTS); 239 Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap, 240 GlobalMap, LTS, getLoopForInst(Store)); 241 242 Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlignment()); 243 } 244 245 void BlockGenerator::copyInstruction(ScopStmt &Stmt, const Instruction *Inst, 246 ValueMapT &BBMap, ValueMapT &GlobalMap, 247 LoopToScevMapT <S) { 248 249 // First check for possible scalar dependences for this instruction. 250 generateScalarLoads(Stmt, Inst, BBMap); 251 252 // Terminator instructions control the control flow. They are explicitly 253 // expressed in the clast and do not need to be copied. 254 if (Inst->isTerminator()) 255 return; 256 257 Loop *L = getLoopForInst(Inst); 258 if ((Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) && 259 canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) { 260 Value *NewValue = getNewValue(Stmt, Inst, BBMap, GlobalMap, LTS, L); 261 BBMap[Inst] = NewValue; 262 return; 263 } 264 265 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 266 Value *NewLoad = generateScalarLoad(Stmt, Load, BBMap, GlobalMap, LTS); 267 // Compute NewLoad before its insertion in BBMap to make the insertion 268 // deterministic. 269 BBMap[Load] = NewLoad; 270 return; 271 } 272 273 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 274 generateScalarStore(Stmt, Store, BBMap, GlobalMap, LTS); 275 return; 276 } 277 278 if (const PHINode *PHI = dyn_cast<PHINode>(Inst)) { 279 copyPHIInstruction(Stmt, PHI, BBMap, GlobalMap, LTS); 280 return; 281 } 282 283 // Skip some special intrinsics for which we do not adjust the semantics to 284 // the new schedule. All others are handled like every other instruction. 285 if (auto *IT = dyn_cast<IntrinsicInst>(Inst)) { 286 switch (IT->getIntrinsicID()) { 287 // Lifetime markers are ignored. 288 case llvm::Intrinsic::lifetime_start: 289 case llvm::Intrinsic::lifetime_end: 290 // Invariant markers are ignored. 291 case llvm::Intrinsic::invariant_start: 292 case llvm::Intrinsic::invariant_end: 293 // Some misc annotations are ignored. 294 case llvm::Intrinsic::var_annotation: 295 case llvm::Intrinsic::ptr_annotation: 296 case llvm::Intrinsic::annotation: 297 case llvm::Intrinsic::donothing: 298 case llvm::Intrinsic::assume: 299 case llvm::Intrinsic::expect: 300 return; 301 default: 302 // Other intrinsics are copied. 303 break; 304 } 305 } 306 307 copyInstScalar(Stmt, Inst, BBMap, GlobalMap, LTS); 308 } 309 310 void BlockGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 311 LoopToScevMapT <S) { 312 assert(Stmt.isBlockStmt() && 313 "Only block statements can be copied by the block generator"); 314 315 ValueMapT BBMap; 316 317 BasicBlock *BB = Stmt.getBasicBlock(); 318 copyBB(Stmt, BB, BBMap, GlobalMap, LTS); 319 } 320 321 BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) { 322 BasicBlock *CopyBB = 323 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 324 CopyBB->setName("polly.stmt." + BB->getName()); 325 return CopyBB; 326 } 327 328 BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, 329 ValueMapT &BBMap, ValueMapT &GlobalMap, 330 LoopToScevMapT <S) { 331 BasicBlock *CopyBB = splitBB(BB); 332 copyBB(Stmt, BB, CopyBB, BBMap, GlobalMap, LTS); 333 return CopyBB; 334 } 335 336 void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB, 337 ValueMapT &BBMap, ValueMapT &GlobalMap, 338 LoopToScevMapT <S) { 339 Builder.SetInsertPoint(CopyBB->begin()); 340 EntryBB = &CopyBB->getParent()->getEntryBlock(); 341 342 for (Instruction &Inst : *BB) 343 copyInstruction(Stmt, &Inst, BBMap, GlobalMap, LTS); 344 345 // After a basic block was copied store all scalars that escape this block 346 // in their alloca. First the scalars that have dependences inside the SCoP, 347 // then the ones that might escape the SCoP. 348 generateScalarStores(Stmt, BB, BBMap, GlobalMap); 349 350 const Region &R = Stmt.getParent()->getRegion(); 351 for (Instruction &Inst : *BB) 352 handleOutsideUsers(R, &Inst, BBMap[&Inst]); 353 } 354 355 AllocaInst *BlockGenerator::getOrCreateAlloca(Instruction *ScalarBase, 356 ScalarAllocaMapTy &Map, 357 const char *NameExt, 358 bool *IsNew) { 359 360 // Check if an alloca was cached for the base instruction. 361 AllocaInst *&Addr = Map[ScalarBase]; 362 363 // If needed indicate if it was found already or will be created. 364 if (IsNew) 365 *IsNew = (Addr == nullptr); 366 367 // If no alloca was found create one and insert it in the entry block. 368 if (!Addr) { 369 auto *Ty = ScalarBase->getType(); 370 Addr = new AllocaInst(Ty, ScalarBase->getName() + NameExt); 371 Addr->insertBefore(EntryBB->getFirstInsertionPt()); 372 } 373 374 return Addr; 375 } 376 377 void BlockGenerator::handleOutsideUsers(const Region &R, Instruction *Inst, 378 Value *InstCopy) { 379 BasicBlock *ExitBB = R.getExit(); 380 381 EscapeUserVectorTy EscapeUsers; 382 for (User *U : Inst->users()) { 383 384 // Non-instruction user will never escape. 385 Instruction *UI = dyn_cast<Instruction>(U); 386 if (!UI) 387 continue; 388 389 if (R.contains(UI) && ExitBB != UI->getParent()) 390 continue; 391 392 EscapeUsers.push_back(UI); 393 } 394 395 // Exit if no escape uses were found. 396 if (EscapeUsers.empty()) 397 return; 398 399 // If there are escape users we get the alloca for this instruction and put 400 // it in the EscapeMap for later finalization. However, if the alloca was not 401 // created by an already handled scalar dependence we have to initialize it 402 // also. Lastly, if the instruction was copied multiple times we already did 403 // this and can exit. 404 if (EscapeMap.count(Inst)) 405 return; 406 407 // Get or create an escape alloca for this instruction. 408 bool IsNew; 409 AllocaInst *ScalarAddr = 410 getOrCreateAlloca(Inst, ScalarMap, ".escape", &IsNew); 411 412 // Remember that this instruction has escape uses and the escape alloca. 413 EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers)); 414 415 // If the escape alloca was just created store the instruction in there, 416 // otherwise that happened already. 417 if (IsNew) { 418 assert(InstCopy && "Except PHIs every instruction should have a copy!"); 419 Builder.CreateStore(InstCopy, ScalarAddr); 420 } 421 } 422 423 void BlockGenerator::generateScalarLoads(ScopStmt &Stmt, 424 const Instruction *Inst, 425 ValueMapT &BBMap) { 426 auto *MAL = Stmt.lookupAccessesFor(Inst); 427 428 if (!MAL) 429 return; 430 431 for (MemoryAccess &MA : *MAL) { 432 AllocaInst *Address; 433 if (!MA.isScalar() || !MA.isRead()) 434 continue; 435 436 auto Base = cast<Instruction>(MA.getBaseAddr()); 437 438 if (MA.getScopArrayInfo()->isPHI()) 439 Address = getOrCreateAlloca(Base, PHIOpMap, ".phiops"); 440 else 441 Address = getOrCreateAlloca(Base, ScalarMap, ".s2a"); 442 443 BBMap[Base] = Builder.CreateLoad(Address, Address->getName() + ".reload"); 444 } 445 } 446 447 Value *BlockGenerator::getNewScalarValue(Value *ScalarValue, const Region &R, 448 ScalarAllocaMapTy &ReloadMap, 449 ValueMapT &BBMap, 450 ValueMapT &GlobalMap) { 451 // If the value we want to store is an instruction we might have demoted it 452 // in order to make it accessible here. In such a case a reload is 453 // necessary. If it is no instruction it will always be a value that 454 // dominates the current point and we can just use it. In total there are 4 455 // options: 456 // (1) The value is no instruction ==> use the value. 457 // (2) The value is an instruction that was split out of the region prior to 458 // code generation ==> use the instruction as it dominates the region. 459 // (3) The value is an instruction: 460 // (a) The value was defined in the current block, thus a copy is in 461 // the BBMap ==> use the mapped value. 462 // (b) The value was defined in a previous block, thus we demoted it 463 // earlier ==> use the reloaded value. 464 Instruction *ScalarValueInst = dyn_cast<Instruction>(ScalarValue); 465 if (!ScalarValueInst) 466 return ScalarValue; 467 468 if (!R.contains(ScalarValueInst)) { 469 if (Value *ScalarValueCopy = GlobalMap.lookup(ScalarValueInst)) 470 return /* Case (3a) */ ScalarValueCopy; 471 else 472 return /* Case 2 */ ScalarValue; 473 } 474 475 if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst)) 476 return /* Case (3a) */ ScalarValueCopy; 477 478 // Case (3b) 479 assert(ReloadMap.count(ScalarValueInst) && 480 "ScalarInst not mapped in the block and not in the given reload map!"); 481 Value *ReloadAddr = ReloadMap[ScalarValueInst]; 482 ScalarValue = 483 Builder.CreateLoad(ReloadAddr, ReloadAddr->getName() + ".reload"); 484 485 return ScalarValue; 486 } 487 488 void BlockGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 489 ValueMapT &BBMap, 490 ValueMapT &GlobalMap) { 491 const Region &R = Stmt.getParent()->getRegion(); 492 493 assert(Stmt.isBlockStmt() && BB == Stmt.getBasicBlock() && 494 "Region statements need to use the generateScalarStores() " 495 "function in the RegionGenerator"); 496 497 for (MemoryAccess *MA : Stmt) { 498 if (!MA->isScalar() || MA->isRead()) 499 continue; 500 501 Instruction *Base = cast<Instruction>(MA->getBaseAddr()); 502 Instruction *Inst = MA->getAccessInstruction(); 503 504 Value *Val = nullptr; 505 AllocaInst *Address = nullptr; 506 507 if (MA->getScopArrayInfo()->isPHI()) { 508 PHINode *BasePHI = dyn_cast<PHINode>(Base); 509 int PHIIdx = BasePHI->getBasicBlockIndex(BB); 510 assert(PHIIdx >= 0); 511 Address = getOrCreateAlloca(Base, PHIOpMap, ".phiops"); 512 Val = BasePHI->getIncomingValue(PHIIdx); 513 } else { 514 Address = getOrCreateAlloca(Base, ScalarMap, ".s2a"); 515 Val = Inst; 516 } 517 Val = getNewScalarValue(Val, R, ScalarMap, BBMap, GlobalMap); 518 Builder.CreateStore(Val, Address); 519 } 520 } 521 522 void BlockGenerator::createScalarInitialization(Region &R, 523 ValueMapT &GlobalMap) { 524 // The split block __just before__ the region and optimized region. 525 BasicBlock *SplitBB = R.getEnteringBlock(); 526 BranchInst *SplitBBTerm = cast<BranchInst>(SplitBB->getTerminator()); 527 assert(SplitBBTerm->getNumSuccessors() == 2 && "Bad region entering block!"); 528 529 // Get the start block of the __optimized__ region. 530 BasicBlock *StartBB = SplitBBTerm->getSuccessor(0); 531 if (StartBB == R.getEntry()) 532 StartBB = SplitBBTerm->getSuccessor(1); 533 534 // For each PHI predecessor outside the region store the incoming operand 535 // value prior to entering the optimized region. 536 Builder.SetInsertPoint(StartBB->getTerminator()); 537 538 ScalarAllocaMapTy EmptyMap; 539 for (const auto &PHIOpMapping : PHIOpMap) { 540 const PHINode *PHI = cast<PHINode>(PHIOpMapping.getFirst()); 541 542 // Check if this PHI has the split block as predecessor (that is the only 543 // possible predecessor outside the SCoP). 544 int idx = PHI->getBasicBlockIndex(SplitBB); 545 if (idx < 0) 546 continue; 547 548 Value *ScalarValue = PHI->getIncomingValue(idx); 549 ScalarValue = 550 getNewScalarValue(ScalarValue, R, EmptyMap, GlobalMap, GlobalMap); 551 552 // If the split block is the predecessor initialize the PHI operator alloca. 553 Builder.CreateStore(ScalarValue, PHIOpMapping.getSecond()); 554 } 555 } 556 557 void BlockGenerator::createScalarFinalization(Region &R) { 558 // The exit block of the __unoptimized__ region. 559 BasicBlock *ExitBB = R.getExitingBlock(); 560 // The merge block __just after__ the region and the optimized region. 561 BasicBlock *MergeBB = R.getExit(); 562 563 // The exit block of the __optimized__ region. 564 BasicBlock *OptExitBB = *(pred_begin(MergeBB)); 565 if (OptExitBB == ExitBB) 566 OptExitBB = *(++pred_begin(MergeBB)); 567 568 Builder.SetInsertPoint(OptExitBB->getTerminator()); 569 for (const auto &EscapeMapping : EscapeMap) { 570 // Extract the escaping instruction and the escaping users as well as the 571 // alloca the instruction was demoted to. 572 Instruction *EscapeInst = EscapeMapping.getFirst(); 573 const auto &EscapeMappingValue = EscapeMapping.getSecond(); 574 const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second; 575 AllocaInst *ScalarAddr = EscapeMappingValue.first; 576 577 // Reload the demoted instruction in the optimized version of the SCoP. 578 Instruction *EscapeInstReload = 579 Builder.CreateLoad(ScalarAddr, EscapeInst->getName() + ".final_reload"); 580 581 // Create the merge PHI that merges the optimized and unoptimized version. 582 PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2, 583 EscapeInst->getName() + ".merge"); 584 MergePHI->insertBefore(MergeBB->getFirstInsertionPt()); 585 586 // Add the respective values to the merge PHI. 587 MergePHI->addIncoming(EscapeInstReload, OptExitBB); 588 MergePHI->addIncoming(EscapeInst, ExitBB); 589 590 // The information of scalar evolution about the escaping instruction needs 591 // to be revoked so the new merged instruction will be used. 592 if (SE.isSCEVable(EscapeInst->getType())) 593 SE.forgetValue(EscapeInst); 594 595 // Replace all uses of the demoted instruction with the merge PHI. 596 for (Instruction *EUser : EscapeUsers) 597 EUser->replaceUsesOfWith(EscapeInst, MergePHI); 598 } 599 } 600 601 void BlockGenerator::finalizeSCoP(Scop &S, ValueMapT &GlobalMap) { 602 createScalarInitialization(S.getRegion(), GlobalMap); 603 createScalarFinalization(S.getRegion()); 604 } 605 606 VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen, 607 VectorValueMapT &GlobalMaps, 608 std::vector<LoopToScevMapT> &VLTS, 609 isl_map *Schedule) 610 : BlockGenerator(BlockGen), GlobalMaps(GlobalMaps), VLTS(VLTS), 611 Schedule(Schedule) { 612 assert(GlobalMaps.size() > 1 && "Only one vector lane found"); 613 assert(Schedule && "No statement domain provided"); 614 } 615 616 Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, const Value *Old, 617 ValueMapT &VectorMap, 618 VectorValueMapT &ScalarMaps, 619 Loop *L) { 620 if (Value *NewValue = VectorMap.lookup(Old)) 621 return NewValue; 622 623 int Width = getVectorWidth(); 624 625 Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width)); 626 627 for (int Lane = 0; Lane < Width; Lane++) 628 Vector = Builder.CreateInsertElement( 629 Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], GlobalMaps[Lane], 630 VLTS[Lane], L), 631 Builder.getInt32(Lane)); 632 633 VectorMap[Old] = Vector; 634 635 return Vector; 636 } 637 638 Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) { 639 PointerType *PointerTy = dyn_cast<PointerType>(Val->getType()); 640 assert(PointerTy && "PointerType expected"); 641 642 Type *ScalarType = PointerTy->getElementType(); 643 VectorType *VectorType = VectorType::get(ScalarType, Width); 644 645 return PointerType::getUnqual(VectorType); 646 } 647 648 Value *VectorBlockGenerator::generateStrideOneLoad( 649 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps, 650 bool NegativeStride = false) { 651 unsigned VectorWidth = getVectorWidth(); 652 const Value *Pointer = Load->getPointerOperand(); 653 Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth); 654 unsigned Offset = NegativeStride ? VectorWidth - 1 : 0; 655 656 Value *NewPointer = nullptr; 657 NewPointer = generateLocationAccessed(Stmt, Load, Pointer, ScalarMaps[Offset], 658 GlobalMaps[Offset], VLTS[Offset]); 659 Value *VectorPtr = 660 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 661 LoadInst *VecLoad = 662 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full"); 663 if (!Aligned) 664 VecLoad->setAlignment(8); 665 666 if (NegativeStride) { 667 SmallVector<Constant *, 16> Indices; 668 for (int i = VectorWidth - 1; i >= 0; i--) 669 Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i)); 670 Constant *SV = llvm::ConstantVector::get(Indices); 671 Value *RevVecLoad = Builder.CreateShuffleVector( 672 VecLoad, VecLoad, SV, Load->getName() + "_reverse"); 673 return RevVecLoad; 674 } 675 676 return VecLoad; 677 } 678 679 Value *VectorBlockGenerator::generateStrideZeroLoad(ScopStmt &Stmt, 680 const LoadInst *Load, 681 ValueMapT &BBMap) { 682 const Value *Pointer = Load->getPointerOperand(); 683 Type *VectorPtrType = getVectorPtrTy(Pointer, 1); 684 Value *NewPointer = generateLocationAccessed(Stmt, Load, Pointer, BBMap, 685 GlobalMaps[0], VLTS[0]); 686 Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType, 687 Load->getName() + "_p_vec_p"); 688 LoadInst *ScalarLoad = 689 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one"); 690 691 if (!Aligned) 692 ScalarLoad->setAlignment(8); 693 694 Constant *SplatVector = Constant::getNullValue( 695 VectorType::get(Builder.getInt32Ty(), getVectorWidth())); 696 697 Value *VectorLoad = Builder.CreateShuffleVector( 698 ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat"); 699 return VectorLoad; 700 } 701 702 Value *VectorBlockGenerator::generateUnknownStrideLoad( 703 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps) { 704 int VectorWidth = getVectorWidth(); 705 const Value *Pointer = Load->getPointerOperand(); 706 VectorType *VectorType = VectorType::get( 707 dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth); 708 709 Value *Vector = UndefValue::get(VectorType); 710 711 for (int i = 0; i < VectorWidth; i++) { 712 Value *NewPointer = generateLocationAccessed( 713 Stmt, Load, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 714 Value *ScalarLoad = 715 Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_"); 716 Vector = Builder.CreateInsertElement( 717 Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_"); 718 } 719 720 return Vector; 721 } 722 723 void VectorBlockGenerator::generateLoad(ScopStmt &Stmt, const LoadInst *Load, 724 ValueMapT &VectorMap, 725 VectorValueMapT &ScalarMaps) { 726 if (!VectorType::isValidElementType(Load->getType())) { 727 for (int i = 0; i < getVectorWidth(); i++) 728 ScalarMaps[i][Load] = 729 generateScalarLoad(Stmt, Load, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 730 return; 731 } 732 733 const MemoryAccess &Access = Stmt.getAccessFor(Load); 734 735 // Make sure we have scalar values available to access the pointer to 736 // the data location. 737 extractScalarValues(Load, VectorMap, ScalarMaps); 738 739 Value *NewLoad; 740 if (Access.isStrideZero(isl_map_copy(Schedule))) 741 NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0]); 742 else if (Access.isStrideOne(isl_map_copy(Schedule))) 743 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps); 744 else if (Access.isStrideX(isl_map_copy(Schedule), -1)) 745 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, true); 746 else 747 NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps); 748 749 VectorMap[Load] = NewLoad; 750 } 751 752 void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, 753 const UnaryInstruction *Inst, 754 ValueMapT &VectorMap, 755 VectorValueMapT &ScalarMaps) { 756 int VectorWidth = getVectorWidth(); 757 Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap, 758 ScalarMaps, getLoopForInst(Inst)); 759 760 assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction"); 761 762 const CastInst *Cast = dyn_cast<CastInst>(Inst); 763 VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth); 764 VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType); 765 } 766 767 void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, 768 const BinaryOperator *Inst, 769 ValueMapT &VectorMap, 770 VectorValueMapT &ScalarMaps) { 771 Loop *L = getLoopForInst(Inst); 772 Value *OpZero = Inst->getOperand(0); 773 Value *OpOne = Inst->getOperand(1); 774 775 Value *NewOpZero, *NewOpOne; 776 NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L); 777 NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L); 778 779 Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne, 780 Inst->getName() + "p_vec"); 781 VectorMap[Inst] = NewInst; 782 } 783 784 void VectorBlockGenerator::copyStore(ScopStmt &Stmt, const StoreInst *Store, 785 ValueMapT &VectorMap, 786 VectorValueMapT &ScalarMaps) { 787 const MemoryAccess &Access = Stmt.getAccessFor(Store); 788 789 const Value *Pointer = Store->getPointerOperand(); 790 Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap, 791 ScalarMaps, getLoopForInst(Store)); 792 793 // Make sure we have scalar values available to access the pointer to 794 // the data location. 795 extractScalarValues(Store, VectorMap, ScalarMaps); 796 797 if (Access.isStrideOne(isl_map_copy(Schedule))) { 798 Type *VectorPtrType = getVectorPtrTy(Pointer, getVectorWidth()); 799 Value *NewPointer = generateLocationAccessed( 800 Stmt, Store, Pointer, ScalarMaps[0], GlobalMaps[0], VLTS[0]); 801 802 Value *VectorPtr = 803 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 804 StoreInst *Store = Builder.CreateStore(Vector, VectorPtr); 805 806 if (!Aligned) 807 Store->setAlignment(8); 808 } else { 809 for (unsigned i = 0; i < ScalarMaps.size(); i++) { 810 Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i)); 811 Value *NewPointer = generateLocationAccessed( 812 Stmt, Store, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 813 Builder.CreateStore(Scalar, NewPointer); 814 } 815 } 816 } 817 818 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst, 819 ValueMapT &VectorMap) { 820 for (Value *Operand : Inst->operands()) 821 if (VectorMap.count(Operand)) 822 return true; 823 return false; 824 } 825 826 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst, 827 ValueMapT &VectorMap, 828 VectorValueMapT &ScalarMaps) { 829 bool HasVectorOperand = false; 830 int VectorWidth = getVectorWidth(); 831 832 for (Value *Operand : Inst->operands()) { 833 ValueMapT::iterator VecOp = VectorMap.find(Operand); 834 835 if (VecOp == VectorMap.end()) 836 continue; 837 838 HasVectorOperand = true; 839 Value *NewVector = VecOp->second; 840 841 for (int i = 0; i < VectorWidth; ++i) { 842 ValueMapT &SM = ScalarMaps[i]; 843 844 // If there is one scalar extracted, all scalar elements should have 845 // already been extracted by the code here. So no need to check for the 846 // existance of all of them. 847 if (SM.count(Operand)) 848 break; 849 850 SM[Operand] = 851 Builder.CreateExtractElement(NewVector, Builder.getInt32(i)); 852 } 853 } 854 855 return HasVectorOperand; 856 } 857 858 void VectorBlockGenerator::copyInstScalarized(ScopStmt &Stmt, 859 const Instruction *Inst, 860 ValueMapT &VectorMap, 861 VectorValueMapT &ScalarMaps) { 862 bool HasVectorOperand; 863 int VectorWidth = getVectorWidth(); 864 865 HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps); 866 867 for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++) 868 BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane], 869 GlobalMaps[VectorLane], VLTS[VectorLane]); 870 871 if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand) 872 return; 873 874 // Make the result available as vector value. 875 VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth); 876 Value *Vector = UndefValue::get(VectorType); 877 878 for (int i = 0; i < VectorWidth; i++) 879 Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst], 880 Builder.getInt32(i)); 881 882 VectorMap[Inst] = Vector; 883 } 884 885 int VectorBlockGenerator::getVectorWidth() { return GlobalMaps.size(); } 886 887 void VectorBlockGenerator::copyInstruction(ScopStmt &Stmt, 888 const Instruction *Inst, 889 ValueMapT &VectorMap, 890 VectorValueMapT &ScalarMaps) { 891 // Terminator instructions control the control flow. They are explicitly 892 // expressed in the clast and do not need to be copied. 893 if (Inst->isTerminator()) 894 return; 895 896 if (canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) 897 return; 898 899 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 900 generateLoad(Stmt, Load, VectorMap, ScalarMaps); 901 return; 902 } 903 904 if (hasVectorOperands(Inst, VectorMap)) { 905 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 906 copyStore(Stmt, Store, VectorMap, ScalarMaps); 907 return; 908 } 909 910 if (const UnaryInstruction *Unary = dyn_cast<UnaryInstruction>(Inst)) { 911 copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps); 912 return; 913 } 914 915 if (const BinaryOperator *Binary = dyn_cast<BinaryOperator>(Inst)) { 916 copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps); 917 return; 918 } 919 920 // Falltrough: We generate scalar instructions, if we don't know how to 921 // generate vector code. 922 } 923 924 copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps); 925 } 926 927 void VectorBlockGenerator::copyStmt(ScopStmt &Stmt) { 928 assert(Stmt.isBlockStmt() && "TODO: Only block statements can be copied by " 929 "the vector block generator"); 930 931 BasicBlock *BB = Stmt.getBasicBlock(); 932 BasicBlock *CopyBB = 933 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 934 CopyBB->setName("polly.stmt." + BB->getName()); 935 Builder.SetInsertPoint(CopyBB->begin()); 936 937 // Create two maps that store the mapping from the original instructions of 938 // the old basic block to their copies in the new basic block. Those maps 939 // are basic block local. 940 // 941 // As vector code generation is supported there is one map for scalar values 942 // and one for vector values. 943 // 944 // In case we just do scalar code generation, the vectorMap is not used and 945 // the scalarMap has just one dimension, which contains the mapping. 946 // 947 // In case vector code generation is done, an instruction may either appear 948 // in the vector map once (as it is calculating >vectorwidth< values at a 949 // time. Or (if the values are calculated using scalar operations), it 950 // appears once in every dimension of the scalarMap. 951 VectorValueMapT ScalarBlockMap(getVectorWidth()); 952 ValueMapT VectorBlockMap; 953 954 for (Instruction &Inst : *BB) 955 copyInstruction(Stmt, &Inst, VectorBlockMap, ScalarBlockMap); 956 } 957 958 BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB, 959 BasicBlock *BBCopy) { 960 961 BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock(); 962 BasicBlock *BBCopyIDom = BlockMap.lookup(BBIDom); 963 964 if (BBCopyIDom) 965 DT.changeImmediateDominator(BBCopy, BBCopyIDom); 966 967 return BBCopyIDom; 968 } 969 970 void RegionGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 971 LoopToScevMapT <S) { 972 assert(Stmt.isRegionStmt() && 973 "Only region statements can be copied by the region generator"); 974 975 // Forget all old mappings. 976 BlockMap.clear(); 977 RegionMaps.clear(); 978 IncompletePHINodeMap.clear(); 979 980 // The region represented by the statement. 981 Region *R = Stmt.getRegion(); 982 983 // Create a dedicated entry for the region where we can reload all demoted 984 // inputs. 985 BasicBlock *EntryBB = R->getEntry(); 986 BasicBlock *EntryBBCopy = 987 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 988 EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry"); 989 Builder.SetInsertPoint(EntryBBCopy->begin()); 990 991 for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI) 992 if (!R->contains(*PI)) 993 BlockMap[*PI] = EntryBBCopy; 994 995 // Iterate over all blocks in the region in a breadth-first search. 996 std::deque<BasicBlock *> Blocks; 997 SmallPtrSet<BasicBlock *, 8> SeenBlocks; 998 Blocks.push_back(EntryBB); 999 SeenBlocks.insert(EntryBB); 1000 1001 while (!Blocks.empty()) { 1002 BasicBlock *BB = Blocks.front(); 1003 Blocks.pop_front(); 1004 1005 // First split the block and update dominance information. 1006 BasicBlock *BBCopy = splitBB(BB); 1007 BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy); 1008 1009 // In order to remap PHI nodes we store also basic block mappings. 1010 BlockMap[BB] = BBCopy; 1011 1012 // Get the mapping for this block and initialize it with the mapping 1013 // available at its immediate dominator (in the new region). 1014 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1015 RegionMap = RegionMaps[BBCopyIDom]; 1016 1017 // Copy the block with the BlockGenerator. 1018 copyBB(Stmt, BB, BBCopy, RegionMap, GlobalMap, LTS); 1019 1020 // In order to remap PHI nodes we store also basic block mappings. 1021 BlockMap[BB] = BBCopy; 1022 1023 // Add values to incomplete PHI nodes waiting for this block to be copied. 1024 for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB]) 1025 addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, 1026 GlobalMap, LTS); 1027 IncompletePHINodeMap[BB].clear(); 1028 1029 // And continue with new successors inside the region. 1030 for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++) 1031 if (R->contains(*SI) && SeenBlocks.insert(*SI).second) 1032 Blocks.push_back(*SI); 1033 } 1034 1035 // Now create a new dedicated region exit block and add it to the region map. 1036 BasicBlock *ExitBBCopy = 1037 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 1038 ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit"); 1039 BlockMap[R->getExit()] = ExitBBCopy; 1040 1041 repairDominance(R->getExit(), ExitBBCopy); 1042 1043 // As the block generator doesn't handle control flow we need to add the 1044 // region control flow by hand after all blocks have been copied. 1045 for (BasicBlock *BB : SeenBlocks) { 1046 1047 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 1048 1049 BasicBlock *BBCopy = BlockMap[BB]; 1050 Instruction *BICopy = BBCopy->getTerminator(); 1051 1052 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1053 RegionMap.insert(BlockMap.begin(), BlockMap.end()); 1054 1055 Builder.SetInsertPoint(BICopy); 1056 copyInstScalar(Stmt, BI, RegionMap, GlobalMap, LTS); 1057 BICopy->eraseFromParent(); 1058 } 1059 1060 // Add counting PHI nodes to all loops in the region that can be used as 1061 // replacement for SCEVs refering to the old loop. 1062 for (BasicBlock *BB : SeenBlocks) { 1063 Loop *L = LI.getLoopFor(BB); 1064 if (L == nullptr || L->getHeader() != BB) 1065 continue; 1066 1067 BasicBlock *BBCopy = BlockMap[BB]; 1068 Value *NullVal = Builder.getInt32(0); 1069 PHINode *LoopPHI = 1070 PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv"); 1071 Instruction *LoopPHIInc = BinaryOperator::CreateAdd( 1072 LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc"); 1073 LoopPHI->insertBefore(BBCopy->begin()); 1074 LoopPHIInc->insertBefore(BBCopy->getTerminator()); 1075 1076 for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) { 1077 if (!R->contains(PredBB)) 1078 continue; 1079 if (L->contains(PredBB)) 1080 LoopPHI->addIncoming(LoopPHIInc, BlockMap[PredBB]); 1081 else 1082 LoopPHI->addIncoming(NullVal, BlockMap[PredBB]); 1083 } 1084 1085 for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy))) 1086 if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0) 1087 LoopPHI->addIncoming(NullVal, PredBBCopy); 1088 1089 LTS[L] = SE.getUnknown(LoopPHI); 1090 } 1091 1092 // Add all mappings from the region to the global map so outside uses will use 1093 // the copied instructions. 1094 for (auto &BBMap : RegionMaps) 1095 GlobalMap.insert(BBMap.second.begin(), BBMap.second.end()); 1096 1097 // Reset the old insert point for the build. 1098 Builder.SetInsertPoint(ExitBBCopy->begin()); 1099 } 1100 1101 void RegionGenerator::generateScalarLoads(ScopStmt &Stmt, 1102 const Instruction *Inst, 1103 ValueMapT &BBMap) { 1104 1105 // Inside a non-affine region PHI nodes are copied not demoted. Once the 1106 // phi is copied it will reload all inputs from outside the region, hence 1107 // we do not need to generate code for the read access of the operands of a 1108 // PHI. 1109 if (isa<PHINode>(Inst)) 1110 return; 1111 1112 return BlockGenerator::generateScalarLoads(Stmt, Inst, BBMap); 1113 } 1114 1115 void RegionGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 1116 ValueMapT &BBMap, 1117 ValueMapT &GlobalMap) { 1118 const Region &R = Stmt.getParent()->getRegion(); 1119 1120 Region *StmtR = Stmt.getRegion(); 1121 assert(StmtR && "Block statements need to use the generateScalarStores() " 1122 "function in the BlockGenerator"); 1123 1124 for (MemoryAccess *MA : Stmt) { 1125 1126 if (!MA->isScalar() || MA->isRead()) 1127 continue; 1128 1129 Instruction *ScalarBase = cast<Instruction>(MA->getBaseAddr()); 1130 Instruction *ScalarInst = MA->getAccessInstruction(); 1131 PHINode *ScalarBasePHI = dyn_cast<PHINode>(ScalarBase); 1132 1133 // Only generate accesses that belong to this basic block. 1134 if (ScalarInst->getParent() != BB) 1135 continue; 1136 1137 Value *Val = nullptr; 1138 AllocaInst *ScalarAddr = nullptr; 1139 1140 if (MA->getScopArrayInfo()->isPHI()) { 1141 int PHIIdx = ScalarBasePHI->getBasicBlockIndex(BB); 1142 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 1143 Val = ScalarBasePHI->getIncomingValue(PHIIdx); 1144 } else { 1145 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 1146 Val = ScalarInst; 1147 } 1148 1149 Val = getNewScalarValue(Val, R, ScalarMap, BBMap, GlobalMap); 1150 Builder.CreateStore(Val, ScalarAddr); 1151 } 1152 } 1153 1154 void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, const PHINode *PHI, 1155 PHINode *PHICopy, BasicBlock *IncomingBB, 1156 ValueMapT &GlobalMap, 1157 LoopToScevMapT <S) { 1158 Region *StmtR = Stmt.getRegion(); 1159 1160 // If the incoming block was not yet copied mark this PHI as incomplete. 1161 // Once the block will be copied the incoming value will be added. 1162 BasicBlock *BBCopy = BlockMap[IncomingBB]; 1163 if (!BBCopy) { 1164 assert(StmtR->contains(IncomingBB) && 1165 "Bad incoming block for PHI in non-affine region"); 1166 IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy)); 1167 return; 1168 } 1169 1170 Value *OpCopy = nullptr; 1171 if (StmtR->contains(IncomingBB)) { 1172 assert(RegionMaps.count(BBCopy) && 1173 "Incoming PHI block did not have a BBMap"); 1174 ValueMapT &BBCopyMap = RegionMaps[BBCopy]; 1175 1176 Value *Op = PHI->getIncomingValueForBlock(IncomingBB); 1177 OpCopy = 1178 getNewValue(Stmt, Op, BBCopyMap, GlobalMap, LTS, getLoopForInst(PHI)); 1179 } else { 1180 1181 if (PHICopy->getBasicBlockIndex(BBCopy) >= 0) 1182 return; 1183 1184 AllocaInst *PHIOpAddr = 1185 getOrCreateAlloca(const_cast<PHINode *>(PHI), PHIOpMap, ".phiops"); 1186 OpCopy = new LoadInst(PHIOpAddr, PHIOpAddr->getName() + ".reload", 1187 BlockMap[IncomingBB]->getTerminator()); 1188 } 1189 1190 assert(OpCopy && "Incoming PHI value was not copied properly"); 1191 assert(BBCopy && "Incoming PHI block was not copied properly"); 1192 PHICopy->addIncoming(OpCopy, BBCopy); 1193 } 1194 1195 Value *RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, const PHINode *PHI, 1196 ValueMapT &BBMap, 1197 ValueMapT &GlobalMap, 1198 LoopToScevMapT <S) { 1199 unsigned NumIncoming = PHI->getNumIncomingValues(); 1200 PHINode *PHICopy = 1201 Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName()); 1202 PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI()); 1203 BBMap[PHI] = PHICopy; 1204 1205 for (unsigned u = 0; u < NumIncoming; u++) 1206 addOperandToPHI(Stmt, PHI, PHICopy, PHI->getIncomingBlock(u), GlobalMap, 1207 LTS); 1208 return PHICopy; 1209 } 1210