1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the BlockGenerator and VectorBlockGenerator classes, 11 // which generate sequential code and vectorized code for a polyhedral 12 // statement, respectively. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "polly/ScopInfo.h" 17 #include "polly/CodeGen/BlockGenerators.h" 18 #include "polly/CodeGen/CodeGeneration.h" 19 #include "polly/CodeGen/IslExprBuilder.h" 20 #include "polly/Options.h" 21 #include "polly/Support/GICHelper.h" 22 #include "polly/Support/SCEVValidator.h" 23 #include "polly/Support/ScopHelper.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/RegionInfo.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/Analysis/ScalarEvolutionExpander.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 31 #include "isl/aff.h" 32 #include "isl/ast.h" 33 #include "isl/ast_build.h" 34 #include "isl/set.h" 35 #include <deque> 36 37 using namespace llvm; 38 using namespace polly; 39 40 static cl::opt<bool> Aligned("enable-polly-aligned", 41 cl::desc("Assumed aligned memory accesses."), 42 cl::Hidden, cl::init(false), cl::ZeroOrMore, 43 cl::cat(PollyCategory)); 44 45 bool polly::canSynthesize(const Instruction *I, const llvm::LoopInfo *LI, 46 ScalarEvolution *SE, const Region *R) { 47 if (!I || !SE->isSCEVable(I->getType())) 48 return false; 49 50 if (const SCEV *Scev = SE->getSCEV(const_cast<Instruction *>(I))) 51 if (!isa<SCEVCouldNotCompute>(Scev)) 52 if (!hasScalarDepsInsideRegion(Scev, R)) 53 return true; 54 55 return false; 56 } 57 58 bool polly::isIgnoredIntrinsic(const Value *V) { 59 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 60 switch (IT->getIntrinsicID()) { 61 // Lifetime markers are supported/ignored. 62 case llvm::Intrinsic::lifetime_start: 63 case llvm::Intrinsic::lifetime_end: 64 // Invariant markers are supported/ignored. 65 case llvm::Intrinsic::invariant_start: 66 case llvm::Intrinsic::invariant_end: 67 // Some misc annotations are supported/ignored. 68 case llvm::Intrinsic::var_annotation: 69 case llvm::Intrinsic::ptr_annotation: 70 case llvm::Intrinsic::annotation: 71 case llvm::Intrinsic::donothing: 72 case llvm::Intrinsic::assume: 73 case llvm::Intrinsic::expect: 74 return true; 75 default: 76 break; 77 } 78 } 79 return false; 80 } 81 82 BlockGenerator::BlockGenerator(PollyIRBuilder &B, LoopInfo &LI, 83 ScalarEvolution &SE, DominatorTree &DT, 84 ScalarAllocaMapTy &ScalarMap, 85 ScalarAllocaMapTy &PHIOpMap, 86 EscapeUsersAllocaMapTy &EscapeMap, 87 IslExprBuilder *ExprBuilder) 88 : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT), 89 EntryBB(nullptr), PHIOpMap(PHIOpMap), ScalarMap(ScalarMap), 90 EscapeMap(EscapeMap) {} 91 92 Value *BlockGenerator::getNewValue(ScopStmt &Stmt, const Value *Old, 93 ValueMapT &BBMap, ValueMapT &GlobalMap, 94 LoopToScevMapT <S, Loop *L) const { 95 // We assume constants never change. 96 // This avoids map lookups for many calls to this function. 97 if (isa<Constant>(Old)) 98 return const_cast<Value *>(Old); 99 100 if (Value *New = GlobalMap.lookup(Old)) { 101 if (Old->getType()->getScalarSizeInBits() < 102 New->getType()->getScalarSizeInBits()) 103 New = Builder.CreateTruncOrBitCast(New, Old->getType()); 104 105 return New; 106 } 107 108 if (Value *New = BBMap.lookup(Old)) 109 return New; 110 111 if (SE.isSCEVable(Old->getType())) 112 if (const SCEV *Scev = SE.getSCEVAtScope(const_cast<Value *>(Old), L)) { 113 if (!isa<SCEVCouldNotCompute>(Scev)) { 114 const SCEV *NewScev = apply(Scev, LTS, SE); 115 ValueToValueMap VTV; 116 VTV.insert(BBMap.begin(), BBMap.end()); 117 VTV.insert(GlobalMap.begin(), GlobalMap.end()); 118 NewScev = SCEVParameterRewriter::rewrite(NewScev, SE, VTV); 119 SCEVExpander Expander(SE, Stmt.getParent() 120 ->getRegion() 121 .getEntry() 122 ->getParent() 123 ->getParent() 124 ->getDataLayout(), 125 "polly"); 126 Value *Expanded = Expander.expandCodeFor(NewScev, Old->getType(), 127 Builder.GetInsertPoint()); 128 129 BBMap[Old] = Expanded; 130 return Expanded; 131 } 132 } 133 134 // A scop-constant value defined by a global or a function parameter. 135 if (isa<GlobalValue>(Old) || isa<Argument>(Old)) 136 return const_cast<Value *>(Old); 137 138 // A scop-constant value defined by an instruction executed outside the scop. 139 if (const Instruction *Inst = dyn_cast<Instruction>(Old)) 140 if (!Stmt.getParent()->getRegion().contains(Inst->getParent())) 141 return const_cast<Value *>(Old); 142 143 // The scalar dependence is neither available nor SCEVCodegenable. 144 llvm_unreachable("Unexpected scalar dependence in region!"); 145 return nullptr; 146 } 147 148 void BlockGenerator::copyInstScalar(ScopStmt &Stmt, const Instruction *Inst, 149 ValueMapT &BBMap, ValueMapT &GlobalMap, 150 LoopToScevMapT <S) { 151 // We do not generate debug intrinsics as we did not investigate how to 152 // copy them correctly. At the current state, they just crash the code 153 // generation as the meta-data operands are not correctly copied. 154 if (isa<DbgInfoIntrinsic>(Inst)) 155 return; 156 157 Instruction *NewInst = Inst->clone(); 158 159 // Replace old operands with the new ones. 160 for (Value *OldOperand : Inst->operands()) { 161 Value *NewOperand = getNewValue(Stmt, OldOperand, BBMap, GlobalMap, LTS, 162 getLoopForInst(Inst)); 163 164 if (!NewOperand) { 165 assert(!isa<StoreInst>(NewInst) && 166 "Store instructions are always needed!"); 167 delete NewInst; 168 return; 169 } 170 171 NewInst->replaceUsesOfWith(OldOperand, NewOperand); 172 } 173 174 Builder.Insert(NewInst); 175 BBMap[Inst] = NewInst; 176 177 if (!NewInst->getType()->isVoidTy()) 178 NewInst->setName("p_" + Inst->getName()); 179 } 180 181 Value *BlockGenerator::getNewAccessOperand(ScopStmt &Stmt, 182 const MemoryAccess &MA) { 183 isl_pw_multi_aff *PWAccRel; 184 isl_union_map *Schedule; 185 isl_ast_expr *Expr; 186 isl_ast_build *Build = Stmt.getAstBuild(); 187 188 assert(ExprBuilder && Build && 189 "Cannot generate new value without IslExprBuilder!"); 190 191 Schedule = isl_ast_build_get_schedule(Build); 192 PWAccRel = MA.applyScheduleToAccessRelation(Schedule); 193 194 Expr = isl_ast_build_access_from_pw_multi_aff(Build, PWAccRel); 195 Expr = isl_ast_expr_address_of(Expr); 196 197 return ExprBuilder->create(Expr); 198 } 199 200 Value *BlockGenerator::generateLocationAccessed( 201 ScopStmt &Stmt, const Instruction *Inst, const Value *Pointer, 202 ValueMapT &BBMap, ValueMapT &GlobalMap, LoopToScevMapT <S) { 203 const MemoryAccess &MA = Stmt.getAccessFor(Inst); 204 205 Value *NewPointer; 206 if (MA.hasNewAccessRelation()) 207 NewPointer = getNewAccessOperand(Stmt, MA); 208 else 209 NewPointer = 210 getNewValue(Stmt, Pointer, BBMap, GlobalMap, LTS, getLoopForInst(Inst)); 211 212 return NewPointer; 213 } 214 215 Loop *BlockGenerator::getLoopForInst(const llvm::Instruction *Inst) { 216 return LI.getLoopFor(Inst->getParent()); 217 } 218 219 Value *BlockGenerator::generateScalarLoad(ScopStmt &Stmt, const LoadInst *Load, 220 ValueMapT &BBMap, 221 ValueMapT &GlobalMap, 222 LoopToScevMapT <S) { 223 const Value *Pointer = Load->getPointerOperand(); 224 Value *NewPointer = 225 generateLocationAccessed(Stmt, Load, Pointer, BBMap, GlobalMap, LTS); 226 Value *ScalarLoad = Builder.CreateAlignedLoad( 227 NewPointer, Load->getAlignment(), Load->getName() + "_p_scalar_"); 228 return ScalarLoad; 229 } 230 231 Value *BlockGenerator::generateScalarStore(ScopStmt &Stmt, 232 const StoreInst *Store, 233 ValueMapT &BBMap, 234 ValueMapT &GlobalMap, 235 LoopToScevMapT <S) { 236 const Value *Pointer = Store->getPointerOperand(); 237 Value *NewPointer = 238 generateLocationAccessed(Stmt, Store, Pointer, BBMap, GlobalMap, LTS); 239 Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap, 240 GlobalMap, LTS, getLoopForInst(Store)); 241 242 Value *NewStore = Builder.CreateAlignedStore(ValueOperand, NewPointer, 243 Store->getAlignment()); 244 return NewStore; 245 } 246 247 void BlockGenerator::copyInstruction(ScopStmt &Stmt, const Instruction *Inst, 248 ValueMapT &BBMap, ValueMapT &GlobalMap, 249 LoopToScevMapT <S) { 250 251 // First check for possible scalar dependences for this instruction. 252 generateScalarLoads(Stmt, Inst, BBMap); 253 254 // Terminator instructions control the control flow. They are explicitly 255 // expressed in the clast and do not need to be copied. 256 if (Inst->isTerminator()) 257 return; 258 259 Loop *L = getLoopForInst(Inst); 260 if ((Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) && 261 canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) { 262 Value *NewValue = getNewValue(Stmt, Inst, BBMap, GlobalMap, LTS, L); 263 BBMap[Inst] = NewValue; 264 return; 265 } 266 267 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 268 Value *NewLoad = generateScalarLoad(Stmt, Load, BBMap, GlobalMap, LTS); 269 // Compute NewLoad before its insertion in BBMap to make the insertion 270 // deterministic. 271 BBMap[Load] = NewLoad; 272 return; 273 } 274 275 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 276 Value *NewStore = generateScalarStore(Stmt, Store, BBMap, GlobalMap, LTS); 277 // Compute NewStore before its insertion in BBMap to make the insertion 278 // deterministic. 279 BBMap[Store] = NewStore; 280 return; 281 } 282 283 if (const PHINode *PHI = dyn_cast<PHINode>(Inst)) { 284 copyPHIInstruction(Stmt, PHI, BBMap, GlobalMap, LTS); 285 return; 286 } 287 288 // Skip some special intrinsics for which we do not adjust the semantics to 289 // the new schedule. All others are handled like every other instruction. 290 if (auto *IT = dyn_cast<IntrinsicInst>(Inst)) { 291 switch (IT->getIntrinsicID()) { 292 // Lifetime markers are ignored. 293 case llvm::Intrinsic::lifetime_start: 294 case llvm::Intrinsic::lifetime_end: 295 // Invariant markers are ignored. 296 case llvm::Intrinsic::invariant_start: 297 case llvm::Intrinsic::invariant_end: 298 // Some misc annotations are ignored. 299 case llvm::Intrinsic::var_annotation: 300 case llvm::Intrinsic::ptr_annotation: 301 case llvm::Intrinsic::annotation: 302 case llvm::Intrinsic::donothing: 303 case llvm::Intrinsic::assume: 304 case llvm::Intrinsic::expect: 305 return; 306 default: 307 // Other intrinsics are copied. 308 break; 309 } 310 } 311 312 copyInstScalar(Stmt, Inst, BBMap, GlobalMap, LTS); 313 } 314 315 void BlockGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 316 LoopToScevMapT <S) { 317 assert(Stmt.isBlockStmt() && 318 "Only block statements can be copied by the block generator"); 319 320 ValueMapT BBMap; 321 322 BasicBlock *BB = Stmt.getBasicBlock(); 323 copyBB(Stmt, BB, BBMap, GlobalMap, LTS); 324 } 325 326 BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) { 327 BasicBlock *CopyBB = 328 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 329 CopyBB->setName("polly.stmt." + BB->getName()); 330 return CopyBB; 331 } 332 333 BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, 334 ValueMapT &BBMap, ValueMapT &GlobalMap, 335 LoopToScevMapT <S) { 336 BasicBlock *CopyBB = splitBB(BB); 337 copyBB(Stmt, BB, CopyBB, BBMap, GlobalMap, LTS); 338 return CopyBB; 339 } 340 341 void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB, 342 ValueMapT &BBMap, ValueMapT &GlobalMap, 343 LoopToScevMapT <S) { 344 Builder.SetInsertPoint(CopyBB->begin()); 345 EntryBB = &CopyBB->getParent()->getEntryBlock(); 346 347 for (Instruction &Inst : *BB) 348 copyInstruction(Stmt, &Inst, BBMap, GlobalMap, LTS); 349 350 // After a basic block was copied store all scalars that escape this block 351 // in their alloca. First the scalars that have dependences inside the SCoP, 352 // then the ones that might escape the SCoP. 353 generateScalarStores(Stmt, BB, BBMap, GlobalMap); 354 355 const Region &R = Stmt.getParent()->getRegion(); 356 for (Instruction &Inst : *BB) 357 handleOutsideUsers(R, &Inst, BBMap[&Inst]); 358 } 359 360 AllocaInst *BlockGenerator::getOrCreateAlloca(Instruction *ScalarBase, 361 ScalarAllocaMapTy &Map, 362 const char *NameExt, 363 bool *IsNew) { 364 365 // Check if an alloca was cached for the base instruction. 366 AllocaInst *&Addr = Map[ScalarBase]; 367 368 // If needed indicate if it was found already or will be created. 369 if (IsNew) 370 *IsNew = (Addr == nullptr); 371 372 // If no alloca was found create one and insert it in the entry block. 373 if (!Addr) { 374 auto *Ty = ScalarBase->getType(); 375 Addr = new AllocaInst(Ty, ScalarBase->getName() + NameExt); 376 Addr->insertBefore(EntryBB->getFirstInsertionPt()); 377 } 378 379 return Addr; 380 } 381 382 void BlockGenerator::handleOutsideUsers(const Region &R, Instruction *Inst, 383 Value *InstCopy) { 384 BasicBlock *ExitBB = R.getExit(); 385 386 EscapeUserVectorTy EscapeUsers; 387 for (User *U : Inst->users()) { 388 389 // Non-instruction user will never escape. 390 Instruction *UI = dyn_cast<Instruction>(U); 391 if (!UI) 392 continue; 393 394 if (R.contains(UI) && ExitBB != UI->getParent()) 395 continue; 396 397 EscapeUsers.push_back(UI); 398 } 399 400 // Exit if no escape uses were found. 401 if (EscapeUsers.empty()) 402 return; 403 404 // If there are escape users we get the alloca for this instruction and put 405 // it in the EscapeMap for later finalization. However, if the alloca was not 406 // created by an already handled scalar dependence we have to initialize it 407 // also. Lastly, if the instruction was copied multiple times we already did 408 // this and can exit. 409 if (EscapeMap.count(Inst)) 410 return; 411 412 // Get or create an escape alloca for this instruction. 413 bool IsNew; 414 AllocaInst *ScalarAddr = 415 getOrCreateAlloca(Inst, ScalarMap, ".escape", &IsNew); 416 417 // Remember that this instruction has escape uses and the escape alloca. 418 EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers)); 419 420 // If the escape alloca was just created store the instruction in there, 421 // otherwise that happened already. 422 if (IsNew) { 423 assert(InstCopy && "Except PHIs every instruction should have a copy!"); 424 Builder.CreateStore(InstCopy, ScalarAddr); 425 } 426 } 427 428 void BlockGenerator::generateScalarLoads(ScopStmt &Stmt, 429 const Instruction *Inst, 430 ValueMapT &BBMap) { 431 auto *MAL = Stmt.lookupAccessesFor(Inst); 432 433 if (!MAL) 434 return; 435 436 for (MemoryAccess &MA : *MAL) { 437 AllocaInst *Address; 438 if (!MA.isScalar() || !MA.isRead()) 439 continue; 440 441 auto Base = cast<Instruction>(MA.getBaseAddr()); 442 443 if (MA.getScopArrayInfo()->isPHI()) 444 Address = getOrCreateAlloca(Base, PHIOpMap, ".phiops"); 445 else 446 Address = getOrCreateAlloca(Base, ScalarMap, ".s2a"); 447 448 BBMap[Base] = Builder.CreateLoad(Address, Address->getName() + ".reload"); 449 } 450 } 451 452 Value *BlockGenerator::getNewScalarValue(Value *ScalarValue, const Region &R, 453 ScalarAllocaMapTy &ReloadMap, 454 ValueMapT &BBMap, 455 ValueMapT &GlobalMap) { 456 // If the value we want to store is an instruction we might have demoted it 457 // in order to make it accessible here. In such a case a reload is 458 // necessary. If it is no instruction it will always be a value that 459 // dominates the current point and we can just use it. In total there are 4 460 // options: 461 // (1) The value is no instruction ==> use the value. 462 // (2) The value is an instruction that was split out of the region prior to 463 // code generation ==> use the instruction as it dominates the region. 464 // (3) The value is an instruction: 465 // (a) The value was defined in the current block, thus a copy is in 466 // the BBMap ==> use the mapped value. 467 // (b) The value was defined in a previous block, thus we demoted it 468 // earlier ==> use the reloaded value. 469 Instruction *ScalarValueInst = dyn_cast<Instruction>(ScalarValue); 470 if (!ScalarValueInst) 471 return ScalarValue; 472 473 if (!R.contains(ScalarValueInst)) { 474 if (Value *ScalarValueCopy = GlobalMap.lookup(ScalarValueInst)) 475 return /* Case (3a) */ ScalarValueCopy; 476 else 477 return /* Case 2 */ ScalarValue; 478 } 479 480 if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst)) 481 return /* Case (3a) */ ScalarValueCopy; 482 483 // Case (3b) 484 assert(ReloadMap.count(ScalarValueInst) && 485 "ScalarInst not mapped in the block and not in the given reload map!"); 486 Value *ReloadAddr = ReloadMap[ScalarValueInst]; 487 ScalarValue = 488 Builder.CreateLoad(ReloadAddr, ReloadAddr->getName() + ".reload"); 489 490 return ScalarValue; 491 } 492 493 void BlockGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 494 ValueMapT &BBMap, 495 ValueMapT &GlobalMap) { 496 const Region &R = Stmt.getParent()->getRegion(); 497 498 assert(Stmt.isBlockStmt() && BB == Stmt.getBasicBlock() && 499 "Region statements need to use the generateScalarStores() " 500 "function in the RegionGenerator"); 501 502 for (MemoryAccess *MA : Stmt) { 503 if (!MA->isScalar() || MA->isRead()) 504 continue; 505 506 Instruction *Base = cast<Instruction>(MA->getBaseAddr()); 507 Instruction *Inst = MA->getAccessInstruction(); 508 509 Value *Val = nullptr; 510 AllocaInst *Address = nullptr; 511 512 if (MA->getScopArrayInfo()->isPHI()) { 513 PHINode *BasePHI = dyn_cast<PHINode>(Base); 514 int PHIIdx = BasePHI->getBasicBlockIndex(BB); 515 Address = getOrCreateAlloca(Base, PHIOpMap, ".phiops"); 516 Val = BasePHI->getIncomingValue(PHIIdx); 517 } else { 518 Address = getOrCreateAlloca(Base, ScalarMap, ".s2a"); 519 Val = Inst; 520 } 521 Val = getNewScalarValue(Val, R, ScalarMap, BBMap, GlobalMap); 522 Builder.CreateStore(Val, Address); 523 } 524 } 525 526 void BlockGenerator::createScalarInitialization(Region &R, 527 ValueMapT &GlobalMap) { 528 // The split block __just before__ the region and optimized region. 529 BasicBlock *SplitBB = R.getEnteringBlock(); 530 BranchInst *SplitBBTerm = cast<BranchInst>(SplitBB->getTerminator()); 531 assert(SplitBBTerm->getNumSuccessors() == 2 && "Bad region entering block!"); 532 533 // Get the start block of the __optimized__ region. 534 BasicBlock *StartBB = SplitBBTerm->getSuccessor(0); 535 if (StartBB == R.getEntry()) 536 StartBB = SplitBBTerm->getSuccessor(1); 537 538 // For each PHI predecessor outside the region store the incoming operand 539 // value prior to entering the optimized region. 540 Builder.SetInsertPoint(StartBB->getTerminator()); 541 542 ScalarAllocaMapTy EmptyMap; 543 for (const auto &PHIOpMapping : PHIOpMap) { 544 const PHINode *PHI = cast<PHINode>(PHIOpMapping.getFirst()); 545 546 // Check if this PHI has the split block as predecessor (that is the only 547 // possible predecessor outside the SCoP). 548 int idx = PHI->getBasicBlockIndex(SplitBB); 549 if (idx < 0) 550 continue; 551 552 Value *ScalarValue = PHI->getIncomingValue(idx); 553 ScalarValue = 554 getNewScalarValue(ScalarValue, R, EmptyMap, GlobalMap, GlobalMap); 555 556 // If the split block is the predecessor initialize the PHI operator alloca. 557 Builder.CreateStore(ScalarValue, PHIOpMapping.getSecond()); 558 } 559 } 560 561 void BlockGenerator::createScalarFinalization(Region &R) { 562 // The exit block of the __unoptimized__ region. 563 BasicBlock *ExitBB = R.getExitingBlock(); 564 // The merge block __just after__ the region and the optimized region. 565 BasicBlock *MergeBB = R.getExit(); 566 567 // The exit block of the __optimized__ region. 568 BasicBlock *OptExitBB = *(pred_begin(MergeBB)); 569 if (OptExitBB == ExitBB) 570 OptExitBB = *(++pred_begin(MergeBB)); 571 572 Builder.SetInsertPoint(OptExitBB->getTerminator()); 573 for (const auto &EscapeMapping : EscapeMap) { 574 // Extract the escaping instruction and the escaping users as well as the 575 // alloca the instruction was demoted to. 576 Instruction *EscapeInst = EscapeMapping.getFirst(); 577 const auto &EscapeMappingValue = EscapeMapping.getSecond(); 578 const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second; 579 AllocaInst *ScalarAddr = EscapeMappingValue.first; 580 581 // Reload the demoted instruction in the optimized version of the SCoP. 582 Instruction *EscapeInstReload = 583 Builder.CreateLoad(ScalarAddr, EscapeInst->getName() + ".final_reload"); 584 585 // Create the merge PHI that merges the optimized and unoptimized version. 586 PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2, 587 EscapeInst->getName() + ".merge"); 588 MergePHI->insertBefore(MergeBB->getFirstInsertionPt()); 589 590 // Add the respective values to the merge PHI. 591 MergePHI->addIncoming(EscapeInstReload, OptExitBB); 592 MergePHI->addIncoming(EscapeInst, ExitBB); 593 594 // The information of scalar evolution about the escaping instruction needs 595 // to be revoked so the new merged instruction will be used. 596 if (SE.isSCEVable(EscapeInst->getType())) 597 SE.forgetValue(EscapeInst); 598 599 // Replace all uses of the demoted instruction with the merge PHI. 600 for (Instruction *EUser : EscapeUsers) 601 EUser->replaceUsesOfWith(EscapeInst, MergePHI); 602 } 603 } 604 605 void BlockGenerator::finalizeSCoP(Scop &S, ValueMapT &GlobalMap) { 606 createScalarInitialization(S.getRegion(), GlobalMap); 607 createScalarFinalization(S.getRegion()); 608 } 609 610 VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen, 611 VectorValueMapT &GlobalMaps, 612 std::vector<LoopToScevMapT> &VLTS, 613 isl_map *Schedule) 614 : BlockGenerator(BlockGen), GlobalMaps(GlobalMaps), VLTS(VLTS), 615 Schedule(Schedule) { 616 assert(GlobalMaps.size() > 1 && "Only one vector lane found"); 617 assert(Schedule && "No statement domain provided"); 618 } 619 620 Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, const Value *Old, 621 ValueMapT &VectorMap, 622 VectorValueMapT &ScalarMaps, 623 Loop *L) { 624 if (Value *NewValue = VectorMap.lookup(Old)) 625 return NewValue; 626 627 int Width = getVectorWidth(); 628 629 Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width)); 630 631 for (int Lane = 0; Lane < Width; Lane++) 632 Vector = Builder.CreateInsertElement( 633 Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], GlobalMaps[Lane], 634 VLTS[Lane], L), 635 Builder.getInt32(Lane)); 636 637 VectorMap[Old] = Vector; 638 639 return Vector; 640 } 641 642 Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) { 643 PointerType *PointerTy = dyn_cast<PointerType>(Val->getType()); 644 assert(PointerTy && "PointerType expected"); 645 646 Type *ScalarType = PointerTy->getElementType(); 647 VectorType *VectorType = VectorType::get(ScalarType, Width); 648 649 return PointerType::getUnqual(VectorType); 650 } 651 652 Value *VectorBlockGenerator::generateStrideOneLoad( 653 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps, 654 bool NegativeStride = false) { 655 unsigned VectorWidth = getVectorWidth(); 656 const Value *Pointer = Load->getPointerOperand(); 657 Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth); 658 unsigned Offset = NegativeStride ? VectorWidth - 1 : 0; 659 660 Value *NewPointer = nullptr; 661 NewPointer = generateLocationAccessed(Stmt, Load, Pointer, ScalarMaps[Offset], 662 GlobalMaps[Offset], VLTS[Offset]); 663 Value *VectorPtr = 664 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 665 LoadInst *VecLoad = 666 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full"); 667 if (!Aligned) 668 VecLoad->setAlignment(8); 669 670 if (NegativeStride) { 671 SmallVector<Constant *, 16> Indices; 672 for (int i = VectorWidth - 1; i >= 0; i--) 673 Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i)); 674 Constant *SV = llvm::ConstantVector::get(Indices); 675 Value *RevVecLoad = Builder.CreateShuffleVector( 676 VecLoad, VecLoad, SV, Load->getName() + "_reverse"); 677 return RevVecLoad; 678 } 679 680 return VecLoad; 681 } 682 683 Value *VectorBlockGenerator::generateStrideZeroLoad(ScopStmt &Stmt, 684 const LoadInst *Load, 685 ValueMapT &BBMap) { 686 const Value *Pointer = Load->getPointerOperand(); 687 Type *VectorPtrType = getVectorPtrTy(Pointer, 1); 688 Value *NewPointer = generateLocationAccessed(Stmt, Load, Pointer, BBMap, 689 GlobalMaps[0], VLTS[0]); 690 Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType, 691 Load->getName() + "_p_vec_p"); 692 LoadInst *ScalarLoad = 693 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one"); 694 695 if (!Aligned) 696 ScalarLoad->setAlignment(8); 697 698 Constant *SplatVector = Constant::getNullValue( 699 VectorType::get(Builder.getInt32Ty(), getVectorWidth())); 700 701 Value *VectorLoad = Builder.CreateShuffleVector( 702 ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat"); 703 return VectorLoad; 704 } 705 706 Value *VectorBlockGenerator::generateUnknownStrideLoad( 707 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps) { 708 int VectorWidth = getVectorWidth(); 709 const Value *Pointer = Load->getPointerOperand(); 710 VectorType *VectorType = VectorType::get( 711 dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth); 712 713 Value *Vector = UndefValue::get(VectorType); 714 715 for (int i = 0; i < VectorWidth; i++) { 716 Value *NewPointer = generateLocationAccessed( 717 Stmt, Load, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 718 Value *ScalarLoad = 719 Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_"); 720 Vector = Builder.CreateInsertElement( 721 Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_"); 722 } 723 724 return Vector; 725 } 726 727 void VectorBlockGenerator::generateLoad(ScopStmt &Stmt, const LoadInst *Load, 728 ValueMapT &VectorMap, 729 VectorValueMapT &ScalarMaps) { 730 if (!VectorType::isValidElementType(Load->getType())) { 731 for (int i = 0; i < getVectorWidth(); i++) 732 ScalarMaps[i][Load] = 733 generateScalarLoad(Stmt, Load, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 734 return; 735 } 736 737 const MemoryAccess &Access = Stmt.getAccessFor(Load); 738 739 // Make sure we have scalar values available to access the pointer to 740 // the data location. 741 extractScalarValues(Load, VectorMap, ScalarMaps); 742 743 Value *NewLoad; 744 if (Access.isStrideZero(isl_map_copy(Schedule))) 745 NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0]); 746 else if (Access.isStrideOne(isl_map_copy(Schedule))) 747 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps); 748 else if (Access.isStrideX(isl_map_copy(Schedule), -1)) 749 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, true); 750 else 751 NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps); 752 753 VectorMap[Load] = NewLoad; 754 } 755 756 void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, 757 const UnaryInstruction *Inst, 758 ValueMapT &VectorMap, 759 VectorValueMapT &ScalarMaps) { 760 int VectorWidth = getVectorWidth(); 761 Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap, 762 ScalarMaps, getLoopForInst(Inst)); 763 764 assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction"); 765 766 const CastInst *Cast = dyn_cast<CastInst>(Inst); 767 VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth); 768 VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType); 769 } 770 771 void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, 772 const BinaryOperator *Inst, 773 ValueMapT &VectorMap, 774 VectorValueMapT &ScalarMaps) { 775 Loop *L = getLoopForInst(Inst); 776 Value *OpZero = Inst->getOperand(0); 777 Value *OpOne = Inst->getOperand(1); 778 779 Value *NewOpZero, *NewOpOne; 780 NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L); 781 NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L); 782 783 Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne, 784 Inst->getName() + "p_vec"); 785 VectorMap[Inst] = NewInst; 786 } 787 788 void VectorBlockGenerator::copyStore(ScopStmt &Stmt, const StoreInst *Store, 789 ValueMapT &VectorMap, 790 VectorValueMapT &ScalarMaps) { 791 const MemoryAccess &Access = Stmt.getAccessFor(Store); 792 793 const Value *Pointer = Store->getPointerOperand(); 794 Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap, 795 ScalarMaps, getLoopForInst(Store)); 796 797 // Make sure we have scalar values available to access the pointer to 798 // the data location. 799 extractScalarValues(Store, VectorMap, ScalarMaps); 800 801 if (Access.isStrideOne(isl_map_copy(Schedule))) { 802 Type *VectorPtrType = getVectorPtrTy(Pointer, getVectorWidth()); 803 Value *NewPointer = generateLocationAccessed( 804 Stmt, Store, Pointer, ScalarMaps[0], GlobalMaps[0], VLTS[0]); 805 806 Value *VectorPtr = 807 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 808 StoreInst *Store = Builder.CreateStore(Vector, VectorPtr); 809 810 if (!Aligned) 811 Store->setAlignment(8); 812 } else { 813 for (unsigned i = 0; i < ScalarMaps.size(); i++) { 814 Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i)); 815 Value *NewPointer = generateLocationAccessed( 816 Stmt, Store, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 817 Builder.CreateStore(Scalar, NewPointer); 818 } 819 } 820 } 821 822 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst, 823 ValueMapT &VectorMap) { 824 for (Value *Operand : Inst->operands()) 825 if (VectorMap.count(Operand)) 826 return true; 827 return false; 828 } 829 830 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst, 831 ValueMapT &VectorMap, 832 VectorValueMapT &ScalarMaps) { 833 bool HasVectorOperand = false; 834 int VectorWidth = getVectorWidth(); 835 836 for (Value *Operand : Inst->operands()) { 837 ValueMapT::iterator VecOp = VectorMap.find(Operand); 838 839 if (VecOp == VectorMap.end()) 840 continue; 841 842 HasVectorOperand = true; 843 Value *NewVector = VecOp->second; 844 845 for (int i = 0; i < VectorWidth; ++i) { 846 ValueMapT &SM = ScalarMaps[i]; 847 848 // If there is one scalar extracted, all scalar elements should have 849 // already been extracted by the code here. So no need to check for the 850 // existance of all of them. 851 if (SM.count(Operand)) 852 break; 853 854 SM[Operand] = 855 Builder.CreateExtractElement(NewVector, Builder.getInt32(i)); 856 } 857 } 858 859 return HasVectorOperand; 860 } 861 862 void VectorBlockGenerator::copyInstScalarized(ScopStmt &Stmt, 863 const Instruction *Inst, 864 ValueMapT &VectorMap, 865 VectorValueMapT &ScalarMaps) { 866 bool HasVectorOperand; 867 int VectorWidth = getVectorWidth(); 868 869 HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps); 870 871 for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++) 872 BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane], 873 GlobalMaps[VectorLane], VLTS[VectorLane]); 874 875 if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand) 876 return; 877 878 // Make the result available as vector value. 879 VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth); 880 Value *Vector = UndefValue::get(VectorType); 881 882 for (int i = 0; i < VectorWidth; i++) 883 Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst], 884 Builder.getInt32(i)); 885 886 VectorMap[Inst] = Vector; 887 } 888 889 int VectorBlockGenerator::getVectorWidth() { return GlobalMaps.size(); } 890 891 void VectorBlockGenerator::copyInstruction(ScopStmt &Stmt, 892 const Instruction *Inst, 893 ValueMapT &VectorMap, 894 VectorValueMapT &ScalarMaps) { 895 // Terminator instructions control the control flow. They are explicitly 896 // expressed in the clast and do not need to be copied. 897 if (Inst->isTerminator()) 898 return; 899 900 if (canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) 901 return; 902 903 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 904 generateLoad(Stmt, Load, VectorMap, ScalarMaps); 905 return; 906 } 907 908 if (hasVectorOperands(Inst, VectorMap)) { 909 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 910 copyStore(Stmt, Store, VectorMap, ScalarMaps); 911 return; 912 } 913 914 if (const UnaryInstruction *Unary = dyn_cast<UnaryInstruction>(Inst)) { 915 copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps); 916 return; 917 } 918 919 if (const BinaryOperator *Binary = dyn_cast<BinaryOperator>(Inst)) { 920 copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps); 921 return; 922 } 923 924 // Falltrough: We generate scalar instructions, if we don't know how to 925 // generate vector code. 926 } 927 928 copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps); 929 } 930 931 void VectorBlockGenerator::copyStmt(ScopStmt &Stmt) { 932 assert(Stmt.isBlockStmt() && "TODO: Only block statements can be copied by " 933 "the vector block generator"); 934 935 BasicBlock *BB = Stmt.getBasicBlock(); 936 BasicBlock *CopyBB = 937 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 938 CopyBB->setName("polly.stmt." + BB->getName()); 939 Builder.SetInsertPoint(CopyBB->begin()); 940 941 // Create two maps that store the mapping from the original instructions of 942 // the old basic block to their copies in the new basic block. Those maps 943 // are basic block local. 944 // 945 // As vector code generation is supported there is one map for scalar values 946 // and one for vector values. 947 // 948 // In case we just do scalar code generation, the vectorMap is not used and 949 // the scalarMap has just one dimension, which contains the mapping. 950 // 951 // In case vector code generation is done, an instruction may either appear 952 // in the vector map once (as it is calculating >vectorwidth< values at a 953 // time. Or (if the values are calculated using scalar operations), it 954 // appears once in every dimension of the scalarMap. 955 VectorValueMapT ScalarBlockMap(getVectorWidth()); 956 ValueMapT VectorBlockMap; 957 958 for (Instruction &Inst : *BB) 959 copyInstruction(Stmt, &Inst, VectorBlockMap, ScalarBlockMap); 960 } 961 962 BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB, 963 BasicBlock *BBCopy) { 964 965 BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock(); 966 BasicBlock *BBCopyIDom = BlockMap.lookup(BBIDom); 967 968 if (BBCopyIDom) 969 DT.changeImmediateDominator(BBCopy, BBCopyIDom); 970 971 return BBCopyIDom; 972 } 973 974 void RegionGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 975 LoopToScevMapT <S) { 976 assert(Stmt.isRegionStmt() && 977 "Only region statements can be copied by the block generator"); 978 979 // Forget all old mappings. 980 BlockMap.clear(); 981 RegionMaps.clear(); 982 IncompletePHINodeMap.clear(); 983 984 // The region represented by the statement. 985 Region *R = Stmt.getRegion(); 986 987 // Create a dedicated entry for the region where we can reload all demoted 988 // inputs. 989 BasicBlock *EntryBB = R->getEntry(); 990 BasicBlock *EntryBBCopy = 991 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 992 EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry"); 993 Builder.SetInsertPoint(EntryBBCopy->begin()); 994 995 for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI) 996 if (!R->contains(*PI)) 997 BlockMap[*PI] = EntryBBCopy; 998 999 // Iterate over all blocks in the region in a breadth-first search. 1000 std::deque<BasicBlock *> Blocks; 1001 SmallPtrSet<BasicBlock *, 8> SeenBlocks; 1002 Blocks.push_back(EntryBB); 1003 SeenBlocks.insert(EntryBB); 1004 1005 while (!Blocks.empty()) { 1006 BasicBlock *BB = Blocks.front(); 1007 Blocks.pop_front(); 1008 1009 // First split the block and update dominance information. 1010 BasicBlock *BBCopy = splitBB(BB); 1011 BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy); 1012 1013 // In order to remap PHI nodes we store also basic block mappings. 1014 BlockMap[BB] = BBCopy; 1015 1016 // Get the mapping for this block and initialize it with the mapping 1017 // available at its immediate dominator (in the new region). 1018 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1019 RegionMap = RegionMaps[BBCopyIDom]; 1020 1021 // Copy the block with the BlockGenerator. 1022 copyBB(Stmt, BB, BBCopy, RegionMap, GlobalMap, LTS); 1023 1024 // In order to remap PHI nodes we store also basic block mappings. 1025 BlockMap[BB] = BBCopy; 1026 1027 // Add values to incomplete PHI nodes waiting for this block to be copied. 1028 for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB]) 1029 addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, 1030 GlobalMap, LTS); 1031 IncompletePHINodeMap[BB].clear(); 1032 1033 // And continue with new successors inside the region. 1034 for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++) 1035 if (R->contains(*SI) && SeenBlocks.insert(*SI).second) 1036 Blocks.push_back(*SI); 1037 } 1038 1039 // Now create a new dedicated region exit block and add it to the region map. 1040 BasicBlock *ExitBBCopy = 1041 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 1042 ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit"); 1043 BlockMap[R->getExit()] = ExitBBCopy; 1044 1045 repairDominance(R->getExit(), ExitBBCopy); 1046 1047 // As the block generator doesn't handle control flow we need to add the 1048 // region control flow by hand after all blocks have been copied. 1049 for (BasicBlock *BB : SeenBlocks) { 1050 1051 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 1052 1053 BasicBlock *BBCopy = BlockMap[BB]; 1054 Instruction *BICopy = BBCopy->getTerminator(); 1055 1056 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1057 RegionMap.insert(BlockMap.begin(), BlockMap.end()); 1058 1059 Builder.SetInsertPoint(BBCopy); 1060 copyInstScalar(Stmt, BI, RegionMap, GlobalMap, LTS); 1061 BICopy->eraseFromParent(); 1062 } 1063 1064 // Add counting PHI nodes to all loops in the region that can be used as 1065 // replacement for SCEVs refering to the old loop. 1066 for (BasicBlock *BB : SeenBlocks) { 1067 Loop *L = LI.getLoopFor(BB); 1068 if (L == nullptr || L->getHeader() != BB) 1069 continue; 1070 1071 BasicBlock *BBCopy = BlockMap[BB]; 1072 Value *NullVal = Builder.getInt32(0); 1073 PHINode *LoopPHI = 1074 PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv"); 1075 Instruction *LoopPHIInc = BinaryOperator::CreateAdd( 1076 LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc"); 1077 LoopPHI->insertBefore(BBCopy->begin()); 1078 LoopPHIInc->insertBefore(BBCopy->getTerminator()); 1079 1080 for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) { 1081 if (!R->contains(PredBB)) 1082 continue; 1083 if (L->contains(PredBB)) 1084 LoopPHI->addIncoming(LoopPHIInc, BlockMap[PredBB]); 1085 else 1086 LoopPHI->addIncoming(NullVal, BlockMap[PredBB]); 1087 } 1088 1089 for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy))) 1090 if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0) 1091 LoopPHI->addIncoming(NullVal, PredBBCopy); 1092 1093 LTS[L] = SE.getUnknown(LoopPHI); 1094 } 1095 1096 // Add all mappings from the region to the global map so outside uses will use 1097 // the copied instructions. 1098 for (auto &BBMap : RegionMaps) 1099 GlobalMap.insert(BBMap.second.begin(), BBMap.second.end()); 1100 1101 // Reset the old insert point for the build. 1102 Builder.SetInsertPoint(ExitBBCopy->begin()); 1103 } 1104 1105 void RegionGenerator::generateScalarLoads(ScopStmt &Stmt, 1106 const Instruction *Inst, 1107 ValueMapT &BBMap) { 1108 1109 // Inside a non-affine region PHI nodes are copied not demoted. Once the 1110 // phi is copied it will reload all inputs from outside the region, hence 1111 // we do not need to generate code for the read access of the operands of a 1112 // PHI. 1113 if (isa<PHINode>(Inst)) 1114 return; 1115 1116 return BlockGenerator::generateScalarLoads(Stmt, Inst, BBMap); 1117 } 1118 1119 void RegionGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 1120 ValueMapT &BBMap, 1121 ValueMapT &GlobalMap) { 1122 const Region &R = Stmt.getParent()->getRegion(); 1123 1124 Region *StmtR = Stmt.getRegion(); 1125 assert(StmtR && "Block statements need to use the generateScalarStores() " 1126 "function in the BlockGenerator"); 1127 1128 for (MemoryAccess *MA : Stmt) { 1129 1130 if (!MA->isScalar() || MA->isRead()) 1131 continue; 1132 1133 Instruction *ScalarBase = cast<Instruction>(MA->getBaseAddr()); 1134 Instruction *ScalarInst = MA->getAccessInstruction(); 1135 PHINode *ScalarBasePHI = dyn_cast<PHINode>(ScalarBase); 1136 1137 Value *Val = nullptr; 1138 AllocaInst *ScalarAddr = nullptr; 1139 1140 if (MA->getScopArrayInfo()->isPHI()) { 1141 int PHIIdx = ScalarBasePHI->getBasicBlockIndex(BB); 1142 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 1143 Val = ScalarBasePHI->getIncomingValue(PHIIdx); 1144 } else { 1145 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 1146 Val = ScalarInst; 1147 } 1148 1149 Val = getNewScalarValue(Val, R, ScalarMap, BBMap, GlobalMap); 1150 Builder.CreateStore(Val, ScalarAddr); 1151 } 1152 } 1153 1154 void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, const PHINode *PHI, 1155 PHINode *PHICopy, BasicBlock *IncomingBB, 1156 ValueMapT &GlobalMap, 1157 LoopToScevMapT <S) { 1158 Region *StmtR = Stmt.getRegion(); 1159 1160 // If the incoming block was not yet copied mark this PHI as incomplete. 1161 // Once the block will be copied the incoming value will be added. 1162 BasicBlock *BBCopy = BlockMap[IncomingBB]; 1163 if (!BBCopy) { 1164 assert(StmtR->contains(IncomingBB) && 1165 "Bad incoming block for PHI in non-affine region"); 1166 IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy)); 1167 return; 1168 } 1169 1170 Value *OpCopy = nullptr; 1171 if (StmtR->contains(IncomingBB)) { 1172 assert(RegionMaps.count(BBCopy) && 1173 "Incoming PHI block did not have a BBMap"); 1174 ValueMapT &BBCopyMap = RegionMaps[BBCopy]; 1175 1176 Value *Op = PHI->getIncomingValueForBlock(IncomingBB); 1177 OpCopy = 1178 getNewValue(Stmt, Op, BBCopyMap, GlobalMap, LTS, getLoopForInst(PHI)); 1179 } else { 1180 1181 if (PHICopy->getBasicBlockIndex(BBCopy) >= 0) 1182 return; 1183 1184 AllocaInst *PHIOpAddr = 1185 getOrCreateAlloca(const_cast<PHINode *>(PHI), PHIOpMap, ".phiops"); 1186 OpCopy = new LoadInst(PHIOpAddr, PHIOpAddr->getName() + ".reload", 1187 BlockMap[IncomingBB]->getTerminator()); 1188 } 1189 1190 assert(OpCopy && "Incoming PHI value was not copied properly"); 1191 assert(BBCopy && "Incoming PHI block was not copied properly"); 1192 PHICopy->addIncoming(OpCopy, BBCopy); 1193 } 1194 1195 Value *RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, const PHINode *PHI, 1196 ValueMapT &BBMap, 1197 ValueMapT &GlobalMap, 1198 LoopToScevMapT <S) { 1199 unsigned NumIncoming = PHI->getNumIncomingValues(); 1200 PHINode *PHICopy = 1201 Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName()); 1202 PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI()); 1203 BBMap[PHI] = PHICopy; 1204 1205 for (unsigned u = 0; u < NumIncoming; u++) 1206 addOperandToPHI(Stmt, PHI, PHICopy, PHI->getIncomingBlock(u), GlobalMap, 1207 LTS); 1208 return PHICopy; 1209 } 1210