1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the BlockGenerator and VectorBlockGenerator classes, 11 // which generate sequential code and vectorized code for a polyhedral 12 // statement, respectively. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "polly/ScopInfo.h" 17 #include "polly/CodeGen/BlockGenerators.h" 18 #include "polly/CodeGen/CodeGeneration.h" 19 #include "polly/CodeGen/IslExprBuilder.h" 20 #include "polly/Options.h" 21 #include "polly/Support/GICHelper.h" 22 #include "polly/Support/SCEVValidator.h" 23 #include "polly/Support/ScopHelper.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/RegionInfo.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/IR/Module.h" 29 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 30 #include "isl/aff.h" 31 #include "isl/ast.h" 32 #include "isl/ast_build.h" 33 #include "isl/set.h" 34 #include <deque> 35 36 using namespace llvm; 37 using namespace polly; 38 39 static cl::opt<bool> Aligned("enable-polly-aligned", 40 cl::desc("Assumed aligned memory accesses."), 41 cl::Hidden, cl::init(false), cl::ZeroOrMore, 42 cl::cat(PollyCategory)); 43 44 bool polly::canSynthesize(const Value *V, const llvm::LoopInfo *LI, 45 ScalarEvolution *SE, const Region *R) { 46 if (!V || !SE->isSCEVable(V->getType())) 47 return false; 48 49 if (const SCEV *Scev = SE->getSCEV(const_cast<Value *>(V))) 50 if (!isa<SCEVCouldNotCompute>(Scev)) 51 if (!hasScalarDepsInsideRegion(Scev, R)) 52 return true; 53 54 return false; 55 } 56 57 bool polly::isIgnoredIntrinsic(const Value *V) { 58 if (auto *IT = dyn_cast<IntrinsicInst>(V)) { 59 switch (IT->getIntrinsicID()) { 60 // Lifetime markers are supported/ignored. 61 case llvm::Intrinsic::lifetime_start: 62 case llvm::Intrinsic::lifetime_end: 63 // Invariant markers are supported/ignored. 64 case llvm::Intrinsic::invariant_start: 65 case llvm::Intrinsic::invariant_end: 66 // Some misc annotations are supported/ignored. 67 case llvm::Intrinsic::var_annotation: 68 case llvm::Intrinsic::ptr_annotation: 69 case llvm::Intrinsic::annotation: 70 case llvm::Intrinsic::donothing: 71 case llvm::Intrinsic::assume: 72 case llvm::Intrinsic::expect: 73 return true; 74 default: 75 break; 76 } 77 } 78 return false; 79 } 80 81 BlockGenerator::BlockGenerator(PollyIRBuilder &B, LoopInfo &LI, 82 ScalarEvolution &SE, DominatorTree &DT, 83 ScalarAllocaMapTy &ScalarMap, 84 ScalarAllocaMapTy &PHIOpMap, 85 EscapeUsersAllocaMapTy &EscapeMap, 86 IslExprBuilder *ExprBuilder) 87 : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT), 88 EntryBB(nullptr), PHIOpMap(PHIOpMap), ScalarMap(ScalarMap), 89 EscapeMap(EscapeMap) {} 90 91 Value *BlockGenerator::getNewValue(ScopStmt &Stmt, const Value *Old, 92 ValueMapT &BBMap, ValueMapT &GlobalMap, 93 LoopToScevMapT <S, Loop *L) const { 94 // We assume constants never change. 95 // This avoids map lookups for many calls to this function. 96 if (isa<Constant>(Old)) 97 return const_cast<Value *>(Old); 98 99 if (Value *New = GlobalMap.lookup(Old)) { 100 if (Old->getType()->getScalarSizeInBits() < 101 New->getType()->getScalarSizeInBits()) 102 New = Builder.CreateTruncOrBitCast(New, Old->getType()); 103 104 return New; 105 } 106 107 if (Value *New = BBMap.lookup(Old)) 108 return New; 109 110 if (SE.isSCEVable(Old->getType())) 111 if (const SCEV *Scev = SE.getSCEVAtScope(const_cast<Value *>(Old), L)) { 112 if (!isa<SCEVCouldNotCompute>(Scev)) { 113 const SCEV *NewScev = apply(Scev, LTS, SE); 114 ValueToValueMap VTV; 115 VTV.insert(BBMap.begin(), BBMap.end()); 116 VTV.insert(GlobalMap.begin(), GlobalMap.end()); 117 NewScev = SCEVParameterRewriter::rewrite(NewScev, SE, VTV); 118 119 Scop &S = *Stmt.getParent(); 120 const DataLayout &DL = 121 S.getRegion().getEntry()->getParent()->getParent()->getDataLayout(); 122 auto IP = Builder.GetInsertPoint(); 123 124 assert(IP != Builder.GetInsertBlock()->end() && 125 "Only instructions can be insert points for SCEVExpander"); 126 Value *Expanded = 127 expandCodeFor(S, SE, DL, "polly", NewScev, Old->getType(), IP); 128 129 BBMap[Old] = Expanded; 130 return Expanded; 131 } 132 } 133 134 // A scop-constant value defined by a global or a function parameter. 135 if (isa<GlobalValue>(Old) || isa<Argument>(Old)) 136 return const_cast<Value *>(Old); 137 138 // A scop-constant value defined by an instruction executed outside the scop. 139 if (const Instruction *Inst = dyn_cast<Instruction>(Old)) 140 if (!Stmt.getParent()->getRegion().contains(Inst->getParent())) 141 return const_cast<Value *>(Old); 142 143 // The scalar dependence is neither available nor SCEVCodegenable. 144 llvm_unreachable("Unexpected scalar dependence in region!"); 145 return nullptr; 146 } 147 148 void BlockGenerator::copyInstScalar(ScopStmt &Stmt, const Instruction *Inst, 149 ValueMapT &BBMap, ValueMapT &GlobalMap, 150 LoopToScevMapT <S) { 151 // We do not generate debug intrinsics as we did not investigate how to 152 // copy them correctly. At the current state, they just crash the code 153 // generation as the meta-data operands are not correctly copied. 154 if (isa<DbgInfoIntrinsic>(Inst)) 155 return; 156 157 Instruction *NewInst = Inst->clone(); 158 159 // Replace old operands with the new ones. 160 for (Value *OldOperand : Inst->operands()) { 161 Value *NewOperand = getNewValue(Stmt, OldOperand, BBMap, GlobalMap, LTS, 162 getLoopForInst(Inst)); 163 164 if (!NewOperand) { 165 assert(!isa<StoreInst>(NewInst) && 166 "Store instructions are always needed!"); 167 delete NewInst; 168 return; 169 } 170 171 NewInst->replaceUsesOfWith(OldOperand, NewOperand); 172 } 173 174 Builder.Insert(NewInst); 175 BBMap[Inst] = NewInst; 176 177 if (!NewInst->getType()->isVoidTy()) 178 NewInst->setName("p_" + Inst->getName()); 179 } 180 181 Value *BlockGenerator::getNewAccessOperand(ScopStmt &Stmt, 182 const MemoryAccess &MA) { 183 isl_pw_multi_aff *PWAccRel; 184 isl_union_map *Schedule; 185 isl_ast_expr *Expr; 186 isl_ast_build *Build = Stmt.getAstBuild(); 187 188 assert(ExprBuilder && Build && 189 "Cannot generate new value without IslExprBuilder!"); 190 191 Schedule = isl_ast_build_get_schedule(Build); 192 PWAccRel = MA.applyScheduleToAccessRelation(Schedule); 193 194 Expr = isl_ast_build_access_from_pw_multi_aff(Build, PWAccRel); 195 Expr = isl_ast_expr_address_of(Expr); 196 197 return ExprBuilder->create(Expr); 198 } 199 200 Value *BlockGenerator::generateLocationAccessed( 201 ScopStmt &Stmt, const Instruction *Inst, const Value *Pointer, 202 ValueMapT &BBMap, ValueMapT &GlobalMap, LoopToScevMapT <S) { 203 const MemoryAccess &MA = Stmt.getAccessFor(Inst); 204 205 Value *NewPointer; 206 if (MA.hasNewAccessRelation()) 207 NewPointer = getNewAccessOperand(Stmt, MA); 208 else 209 NewPointer = 210 getNewValue(Stmt, Pointer, BBMap, GlobalMap, LTS, getLoopForInst(Inst)); 211 212 return NewPointer; 213 } 214 215 Loop *BlockGenerator::getLoopForInst(const llvm::Instruction *Inst) { 216 return LI.getLoopFor(Inst->getParent()); 217 } 218 219 Value *BlockGenerator::generateScalarLoad(ScopStmt &Stmt, const LoadInst *Load, 220 ValueMapT &BBMap, 221 ValueMapT &GlobalMap, 222 LoopToScevMapT <S) { 223 const Value *Pointer = Load->getPointerOperand(); 224 Value *NewPointer = 225 generateLocationAccessed(Stmt, Load, Pointer, BBMap, GlobalMap, LTS); 226 Value *ScalarLoad = Builder.CreateAlignedLoad( 227 NewPointer, Load->getAlignment(), Load->getName() + "_p_scalar_"); 228 return ScalarLoad; 229 } 230 231 void BlockGenerator::generateScalarStore(ScopStmt &Stmt, const StoreInst *Store, 232 ValueMapT &BBMap, ValueMapT &GlobalMap, 233 LoopToScevMapT <S) { 234 const Value *Pointer = Store->getPointerOperand(); 235 Value *NewPointer = 236 generateLocationAccessed(Stmt, Store, Pointer, BBMap, GlobalMap, LTS); 237 Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap, 238 GlobalMap, LTS, getLoopForInst(Store)); 239 240 Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlignment()); 241 } 242 243 void BlockGenerator::copyInstruction(ScopStmt &Stmt, const Instruction *Inst, 244 ValueMapT &BBMap, ValueMapT &GlobalMap, 245 LoopToScevMapT <S) { 246 247 // First check for possible scalar dependences for this instruction. 248 generateScalarLoads(Stmt, Inst, BBMap); 249 250 // Terminator instructions control the control flow. They are explicitly 251 // expressed in the clast and do not need to be copied. 252 if (Inst->isTerminator()) 253 return; 254 255 Loop *L = getLoopForInst(Inst); 256 if ((Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) && 257 canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) { 258 Value *NewValue = getNewValue(Stmt, Inst, BBMap, GlobalMap, LTS, L); 259 BBMap[Inst] = NewValue; 260 return; 261 } 262 263 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 264 Value *NewLoad = generateScalarLoad(Stmt, Load, BBMap, GlobalMap, LTS); 265 // Compute NewLoad before its insertion in BBMap to make the insertion 266 // deterministic. 267 BBMap[Load] = NewLoad; 268 return; 269 } 270 271 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 272 generateScalarStore(Stmt, Store, BBMap, GlobalMap, LTS); 273 return; 274 } 275 276 if (const PHINode *PHI = dyn_cast<PHINode>(Inst)) { 277 copyPHIInstruction(Stmt, PHI, BBMap, GlobalMap, LTS); 278 return; 279 } 280 281 // Skip some special intrinsics for which we do not adjust the semantics to 282 // the new schedule. All others are handled like every other instruction. 283 if (auto *IT = dyn_cast<IntrinsicInst>(Inst)) { 284 switch (IT->getIntrinsicID()) { 285 // Lifetime markers are ignored. 286 case llvm::Intrinsic::lifetime_start: 287 case llvm::Intrinsic::lifetime_end: 288 // Invariant markers are ignored. 289 case llvm::Intrinsic::invariant_start: 290 case llvm::Intrinsic::invariant_end: 291 // Some misc annotations are ignored. 292 case llvm::Intrinsic::var_annotation: 293 case llvm::Intrinsic::ptr_annotation: 294 case llvm::Intrinsic::annotation: 295 case llvm::Intrinsic::donothing: 296 case llvm::Intrinsic::assume: 297 case llvm::Intrinsic::expect: 298 return; 299 default: 300 // Other intrinsics are copied. 301 break; 302 } 303 } 304 305 copyInstScalar(Stmt, Inst, BBMap, GlobalMap, LTS); 306 } 307 308 void BlockGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 309 LoopToScevMapT <S) { 310 assert(Stmt.isBlockStmt() && 311 "Only block statements can be copied by the block generator"); 312 313 ValueMapT BBMap; 314 315 BasicBlock *BB = Stmt.getBasicBlock(); 316 copyBB(Stmt, BB, BBMap, GlobalMap, LTS); 317 } 318 319 BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) { 320 BasicBlock *CopyBB = 321 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 322 CopyBB->setName("polly.stmt." + BB->getName()); 323 return CopyBB; 324 } 325 326 BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, 327 ValueMapT &BBMap, ValueMapT &GlobalMap, 328 LoopToScevMapT <S) { 329 BasicBlock *CopyBB = splitBB(BB); 330 copyBB(Stmt, BB, CopyBB, BBMap, GlobalMap, LTS); 331 return CopyBB; 332 } 333 334 void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB, 335 ValueMapT &BBMap, ValueMapT &GlobalMap, 336 LoopToScevMapT <S) { 337 Builder.SetInsertPoint(CopyBB->begin()); 338 EntryBB = &CopyBB->getParent()->getEntryBlock(); 339 340 for (Instruction &Inst : *BB) 341 copyInstruction(Stmt, &Inst, BBMap, GlobalMap, LTS); 342 343 // After a basic block was copied store all scalars that escape this block 344 // in their alloca. First the scalars that have dependences inside the SCoP, 345 // then the ones that might escape the SCoP. 346 generateScalarStores(Stmt, BB, BBMap, GlobalMap); 347 348 const Region &R = Stmt.getParent()->getRegion(); 349 for (Instruction &Inst : *BB) 350 handleOutsideUsers(R, &Inst, BBMap[&Inst]); 351 } 352 353 AllocaInst *BlockGenerator::getOrCreateAlloca(Value *ScalarBase, 354 ScalarAllocaMapTy &Map, 355 const char *NameExt, 356 bool *IsNew) { 357 358 // Check if an alloca was cached for the base instruction. 359 AllocaInst *&Addr = Map[ScalarBase]; 360 361 // If needed indicate if it was found already or will be created. 362 if (IsNew) 363 *IsNew = (Addr == nullptr); 364 365 // If no alloca was found create one and insert it in the entry block. 366 if (!Addr) { 367 auto *Ty = ScalarBase->getType(); 368 Addr = new AllocaInst(Ty, ScalarBase->getName() + NameExt); 369 Addr->insertBefore(EntryBB->getFirstInsertionPt()); 370 } 371 372 return Addr; 373 } 374 375 void BlockGenerator::handleOutsideUsers(const Region &R, Instruction *Inst, 376 Value *InstCopy) { 377 // If there are escape users we get the alloca for this instruction and put 378 // it in the EscapeMap for later finalization. However, if the alloca was not 379 // created by an already handled scalar dependence we have to initialize it 380 // also. Lastly, if the instruction was copied multiple times we already did 381 // this and can exit. 382 if (EscapeMap.count(Inst)) 383 return; 384 385 EscapeUserVectorTy EscapeUsers; 386 for (User *U : Inst->users()) { 387 388 // Non-instruction user will never escape. 389 Instruction *UI = dyn_cast<Instruction>(U); 390 if (!UI) 391 continue; 392 393 if (R.contains(UI)) 394 continue; 395 396 EscapeUsers.push_back(UI); 397 } 398 399 // Exit if no escape uses were found. 400 if (EscapeUsers.empty()) 401 return; 402 403 // Get or create an escape alloca for this instruction. 404 bool IsNew; 405 AllocaInst *ScalarAddr = 406 getOrCreateAlloca(Inst, ScalarMap, ".escape", &IsNew); 407 408 // Remember that this instruction has escape uses and the escape alloca. 409 EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers)); 410 411 // If the escape alloca was just created store the instruction in there, 412 // otherwise that happened already. 413 if (IsNew) { 414 assert(InstCopy && "Except PHIs every instruction should have a copy!"); 415 Builder.CreateStore(InstCopy, ScalarAddr); 416 } 417 } 418 419 void BlockGenerator::generateScalarLoads(ScopStmt &Stmt, 420 const Instruction *Inst, 421 ValueMapT &BBMap) { 422 auto *MAL = Stmt.lookupAccessesFor(Inst); 423 424 if (!MAL) 425 return; 426 427 for (MemoryAccess &MA : *MAL) { 428 AllocaInst *Address; 429 if (!MA.isScalar() || !MA.isRead()) 430 continue; 431 432 auto Base = MA.getBaseAddr(); 433 434 if (MA.getScopArrayInfo()->isPHI()) 435 Address = getOrCreateAlloca(Base, PHIOpMap, ".phiops"); 436 else 437 Address = getOrCreateAlloca(Base, ScalarMap, ".s2a"); 438 439 BBMap[Base] = Builder.CreateLoad(Address, Address->getName() + ".reload"); 440 } 441 } 442 443 Value *BlockGenerator::getNewScalarValue(Value *ScalarValue, const Region &R, 444 ScalarAllocaMapTy &ReloadMap, 445 ValueMapT &BBMap, 446 ValueMapT &GlobalMap) { 447 // If the value we want to store is an instruction we might have demoted it 448 // in order to make it accessible here. In such a case a reload is 449 // necessary. If it is no instruction it will always be a value that 450 // dominates the current point and we can just use it. In total there are 4 451 // options: 452 // (1) The value is no instruction ==> use the value. 453 // (2) The value is an instruction that was split out of the region prior to 454 // code generation ==> use the instruction as it dominates the region. 455 // (3) The value is an instruction: 456 // (a) The value was defined in the current block, thus a copy is in 457 // the BBMap ==> use the mapped value. 458 // (b) The value was defined in a previous block, thus we demoted it 459 // earlier ==> use the reloaded value. 460 Instruction *ScalarValueInst = dyn_cast<Instruction>(ScalarValue); 461 if (!ScalarValueInst) 462 return ScalarValue; 463 464 if (!R.contains(ScalarValueInst)) { 465 if (Value *ScalarValueCopy = GlobalMap.lookup(ScalarValueInst)) 466 return /* Case (3a) */ ScalarValueCopy; 467 else 468 return /* Case 2 */ ScalarValue; 469 } 470 471 if (Value *ScalarValueCopy = BBMap.lookup(ScalarValueInst)) 472 return /* Case (3a) */ ScalarValueCopy; 473 474 // Case (3b) 475 Value *ReloadAddr = getOrCreateAlloca(ScalarValueInst, ReloadMap, ".s2a"); 476 ScalarValue = 477 Builder.CreateLoad(ReloadAddr, ReloadAddr->getName() + ".reload"); 478 479 return ScalarValue; 480 } 481 482 void BlockGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 483 ValueMapT &BBMap, 484 ValueMapT &GlobalMap) { 485 const Region &R = Stmt.getParent()->getRegion(); 486 487 assert(Stmt.isBlockStmt() && BB == Stmt.getBasicBlock() && 488 "Region statements need to use the generateScalarStores() " 489 "function in the RegionGenerator"); 490 491 for (MemoryAccess *MA : Stmt) { 492 if (!MA->isScalar() || MA->isRead()) 493 continue; 494 495 Instruction *Base = cast<Instruction>(MA->getBaseAddr()); 496 Value *Val = MA->getAccessValue(); 497 498 AllocaInst *Address = nullptr; 499 if (MA->getScopArrayInfo()->isPHI()) 500 Address = getOrCreateAlloca(Base, PHIOpMap, ".phiops"); 501 else 502 Address = getOrCreateAlloca(Base, ScalarMap, ".s2a"); 503 504 Val = getNewScalarValue(Val, R, ScalarMap, BBMap, GlobalMap); 505 Builder.CreateStore(Val, Address); 506 } 507 } 508 509 void BlockGenerator::createScalarInitialization(Region &R, 510 ValueMapT &GlobalMap) { 511 // The split block __just before__ the region and optimized region. 512 BasicBlock *SplitBB = R.getEnteringBlock(); 513 BranchInst *SplitBBTerm = cast<BranchInst>(SplitBB->getTerminator()); 514 assert(SplitBBTerm->getNumSuccessors() == 2 && "Bad region entering block!"); 515 516 // Get the start block of the __optimized__ region. 517 BasicBlock *StartBB = SplitBBTerm->getSuccessor(0); 518 if (StartBB == R.getEntry()) 519 StartBB = SplitBBTerm->getSuccessor(1); 520 521 // For each PHI predecessor outside the region store the incoming operand 522 // value prior to entering the optimized region. 523 Builder.SetInsertPoint(StartBB->getTerminator()); 524 525 ScalarAllocaMapTy EmptyMap; 526 for (const auto &PHIOpMapping : PHIOpMap) { 527 const PHINode *PHI = cast<PHINode>(PHIOpMapping.getFirst()); 528 529 // Check if this PHI has the split block as predecessor (that is the only 530 // possible predecessor outside the SCoP). 531 int idx = PHI->getBasicBlockIndex(SplitBB); 532 if (idx < 0) 533 continue; 534 535 Value *ScalarValue = PHI->getIncomingValue(idx); 536 ScalarValue = 537 getNewScalarValue(ScalarValue, R, EmptyMap, GlobalMap, GlobalMap); 538 539 // If the split block is the predecessor initialize the PHI operator alloca. 540 Builder.CreateStore(ScalarValue, PHIOpMapping.getSecond()); 541 } 542 } 543 544 void BlockGenerator::createScalarFinalization(Region &R) { 545 // The exit block of the __unoptimized__ region. 546 BasicBlock *ExitBB = R.getExitingBlock(); 547 // The merge block __just after__ the region and the optimized region. 548 BasicBlock *MergeBB = R.getExit(); 549 550 // The exit block of the __optimized__ region. 551 BasicBlock *OptExitBB = *(pred_begin(MergeBB)); 552 if (OptExitBB == ExitBB) 553 OptExitBB = *(++pred_begin(MergeBB)); 554 555 Builder.SetInsertPoint(OptExitBB->getTerminator()); 556 for (const auto &EscapeMapping : EscapeMap) { 557 // Extract the escaping instruction and the escaping users as well as the 558 // alloca the instruction was demoted to. 559 Instruction *EscapeInst = EscapeMapping.getFirst(); 560 const auto &EscapeMappingValue = EscapeMapping.getSecond(); 561 const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second; 562 AllocaInst *ScalarAddr = EscapeMappingValue.first; 563 564 // Reload the demoted instruction in the optimized version of the SCoP. 565 Instruction *EscapeInstReload = 566 Builder.CreateLoad(ScalarAddr, EscapeInst->getName() + ".final_reload"); 567 568 // Create the merge PHI that merges the optimized and unoptimized version. 569 PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2, 570 EscapeInst->getName() + ".merge"); 571 MergePHI->insertBefore(MergeBB->getFirstInsertionPt()); 572 573 // Add the respective values to the merge PHI. 574 MergePHI->addIncoming(EscapeInstReload, OptExitBB); 575 MergePHI->addIncoming(EscapeInst, ExitBB); 576 577 // The information of scalar evolution about the escaping instruction needs 578 // to be revoked so the new merged instruction will be used. 579 if (SE.isSCEVable(EscapeInst->getType())) 580 SE.forgetValue(EscapeInst); 581 582 // Replace all uses of the demoted instruction with the merge PHI. 583 for (Instruction *EUser : EscapeUsers) 584 EUser->replaceUsesOfWith(EscapeInst, MergePHI); 585 } 586 } 587 588 void BlockGenerator::finalizeSCoP(Scop &S, ValueMapT &GlobalMap) { 589 createScalarInitialization(S.getRegion(), GlobalMap); 590 createScalarFinalization(S.getRegion()); 591 } 592 593 VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen, 594 VectorValueMapT &GlobalMaps, 595 std::vector<LoopToScevMapT> &VLTS, 596 isl_map *Schedule) 597 : BlockGenerator(BlockGen), GlobalMaps(GlobalMaps), VLTS(VLTS), 598 Schedule(Schedule) { 599 assert(GlobalMaps.size() > 1 && "Only one vector lane found"); 600 assert(Schedule && "No statement domain provided"); 601 } 602 603 Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, const Value *Old, 604 ValueMapT &VectorMap, 605 VectorValueMapT &ScalarMaps, 606 Loop *L) { 607 if (Value *NewValue = VectorMap.lookup(Old)) 608 return NewValue; 609 610 int Width = getVectorWidth(); 611 612 Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width)); 613 614 for (int Lane = 0; Lane < Width; Lane++) 615 Vector = Builder.CreateInsertElement( 616 Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], GlobalMaps[Lane], 617 VLTS[Lane], L), 618 Builder.getInt32(Lane)); 619 620 VectorMap[Old] = Vector; 621 622 return Vector; 623 } 624 625 Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) { 626 PointerType *PointerTy = dyn_cast<PointerType>(Val->getType()); 627 assert(PointerTy && "PointerType expected"); 628 629 Type *ScalarType = PointerTy->getElementType(); 630 VectorType *VectorType = VectorType::get(ScalarType, Width); 631 632 return PointerType::getUnqual(VectorType); 633 } 634 635 Value *VectorBlockGenerator::generateStrideOneLoad( 636 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps, 637 bool NegativeStride = false) { 638 unsigned VectorWidth = getVectorWidth(); 639 const Value *Pointer = Load->getPointerOperand(); 640 Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth); 641 unsigned Offset = NegativeStride ? VectorWidth - 1 : 0; 642 643 Value *NewPointer = nullptr; 644 NewPointer = generateLocationAccessed(Stmt, Load, Pointer, ScalarMaps[Offset], 645 GlobalMaps[Offset], VLTS[Offset]); 646 Value *VectorPtr = 647 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 648 LoadInst *VecLoad = 649 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full"); 650 if (!Aligned) 651 VecLoad->setAlignment(8); 652 653 if (NegativeStride) { 654 SmallVector<Constant *, 16> Indices; 655 for (int i = VectorWidth - 1; i >= 0; i--) 656 Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i)); 657 Constant *SV = llvm::ConstantVector::get(Indices); 658 Value *RevVecLoad = Builder.CreateShuffleVector( 659 VecLoad, VecLoad, SV, Load->getName() + "_reverse"); 660 return RevVecLoad; 661 } 662 663 return VecLoad; 664 } 665 666 Value *VectorBlockGenerator::generateStrideZeroLoad(ScopStmt &Stmt, 667 const LoadInst *Load, 668 ValueMapT &BBMap) { 669 const Value *Pointer = Load->getPointerOperand(); 670 Type *VectorPtrType = getVectorPtrTy(Pointer, 1); 671 Value *NewPointer = generateLocationAccessed(Stmt, Load, Pointer, BBMap, 672 GlobalMaps[0], VLTS[0]); 673 Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType, 674 Load->getName() + "_p_vec_p"); 675 LoadInst *ScalarLoad = 676 Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one"); 677 678 if (!Aligned) 679 ScalarLoad->setAlignment(8); 680 681 Constant *SplatVector = Constant::getNullValue( 682 VectorType::get(Builder.getInt32Ty(), getVectorWidth())); 683 684 Value *VectorLoad = Builder.CreateShuffleVector( 685 ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat"); 686 return VectorLoad; 687 } 688 689 Value *VectorBlockGenerator::generateUnknownStrideLoad( 690 ScopStmt &Stmt, const LoadInst *Load, VectorValueMapT &ScalarMaps) { 691 int VectorWidth = getVectorWidth(); 692 const Value *Pointer = Load->getPointerOperand(); 693 VectorType *VectorType = VectorType::get( 694 dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth); 695 696 Value *Vector = UndefValue::get(VectorType); 697 698 for (int i = 0; i < VectorWidth; i++) { 699 Value *NewPointer = generateLocationAccessed( 700 Stmt, Load, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 701 Value *ScalarLoad = 702 Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_"); 703 Vector = Builder.CreateInsertElement( 704 Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_"); 705 } 706 707 return Vector; 708 } 709 710 void VectorBlockGenerator::generateLoad(ScopStmt &Stmt, const LoadInst *Load, 711 ValueMapT &VectorMap, 712 VectorValueMapT &ScalarMaps) { 713 if (!VectorType::isValidElementType(Load->getType())) { 714 for (int i = 0; i < getVectorWidth(); i++) 715 ScalarMaps[i][Load] = 716 generateScalarLoad(Stmt, Load, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 717 return; 718 } 719 720 const MemoryAccess &Access = Stmt.getAccessFor(Load); 721 722 // Make sure we have scalar values available to access the pointer to 723 // the data location. 724 extractScalarValues(Load, VectorMap, ScalarMaps); 725 726 Value *NewLoad; 727 if (Access.isStrideZero(isl_map_copy(Schedule))) 728 NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0]); 729 else if (Access.isStrideOne(isl_map_copy(Schedule))) 730 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps); 731 else if (Access.isStrideX(isl_map_copy(Schedule), -1)) 732 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, true); 733 else 734 NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps); 735 736 VectorMap[Load] = NewLoad; 737 } 738 739 void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, 740 const UnaryInstruction *Inst, 741 ValueMapT &VectorMap, 742 VectorValueMapT &ScalarMaps) { 743 int VectorWidth = getVectorWidth(); 744 Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap, 745 ScalarMaps, getLoopForInst(Inst)); 746 747 assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction"); 748 749 const CastInst *Cast = dyn_cast<CastInst>(Inst); 750 VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth); 751 VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType); 752 } 753 754 void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, 755 const BinaryOperator *Inst, 756 ValueMapT &VectorMap, 757 VectorValueMapT &ScalarMaps) { 758 Loop *L = getLoopForInst(Inst); 759 Value *OpZero = Inst->getOperand(0); 760 Value *OpOne = Inst->getOperand(1); 761 762 Value *NewOpZero, *NewOpOne; 763 NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L); 764 NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L); 765 766 Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne, 767 Inst->getName() + "p_vec"); 768 VectorMap[Inst] = NewInst; 769 } 770 771 void VectorBlockGenerator::copyStore(ScopStmt &Stmt, const StoreInst *Store, 772 ValueMapT &VectorMap, 773 VectorValueMapT &ScalarMaps) { 774 const MemoryAccess &Access = Stmt.getAccessFor(Store); 775 776 const Value *Pointer = Store->getPointerOperand(); 777 Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap, 778 ScalarMaps, getLoopForInst(Store)); 779 780 // Make sure we have scalar values available to access the pointer to 781 // the data location. 782 extractScalarValues(Store, VectorMap, ScalarMaps); 783 784 if (Access.isStrideOne(isl_map_copy(Schedule))) { 785 Type *VectorPtrType = getVectorPtrTy(Pointer, getVectorWidth()); 786 Value *NewPointer = generateLocationAccessed( 787 Stmt, Store, Pointer, ScalarMaps[0], GlobalMaps[0], VLTS[0]); 788 789 Value *VectorPtr = 790 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); 791 StoreInst *Store = Builder.CreateStore(Vector, VectorPtr); 792 793 if (!Aligned) 794 Store->setAlignment(8); 795 } else { 796 for (unsigned i = 0; i < ScalarMaps.size(); i++) { 797 Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i)); 798 Value *NewPointer = generateLocationAccessed( 799 Stmt, Store, Pointer, ScalarMaps[i], GlobalMaps[i], VLTS[i]); 800 Builder.CreateStore(Scalar, NewPointer); 801 } 802 } 803 } 804 805 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst, 806 ValueMapT &VectorMap) { 807 for (Value *Operand : Inst->operands()) 808 if (VectorMap.count(Operand)) 809 return true; 810 return false; 811 } 812 813 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst, 814 ValueMapT &VectorMap, 815 VectorValueMapT &ScalarMaps) { 816 bool HasVectorOperand = false; 817 int VectorWidth = getVectorWidth(); 818 819 for (Value *Operand : Inst->operands()) { 820 ValueMapT::iterator VecOp = VectorMap.find(Operand); 821 822 if (VecOp == VectorMap.end()) 823 continue; 824 825 HasVectorOperand = true; 826 Value *NewVector = VecOp->second; 827 828 for (int i = 0; i < VectorWidth; ++i) { 829 ValueMapT &SM = ScalarMaps[i]; 830 831 // If there is one scalar extracted, all scalar elements should have 832 // already been extracted by the code here. So no need to check for the 833 // existance of all of them. 834 if (SM.count(Operand)) 835 break; 836 837 SM[Operand] = 838 Builder.CreateExtractElement(NewVector, Builder.getInt32(i)); 839 } 840 } 841 842 return HasVectorOperand; 843 } 844 845 void VectorBlockGenerator::copyInstScalarized(ScopStmt &Stmt, 846 const Instruction *Inst, 847 ValueMapT &VectorMap, 848 VectorValueMapT &ScalarMaps) { 849 bool HasVectorOperand; 850 int VectorWidth = getVectorWidth(); 851 852 HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps); 853 854 for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++) 855 BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane], 856 GlobalMaps[VectorLane], VLTS[VectorLane]); 857 858 if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand) 859 return; 860 861 // Make the result available as vector value. 862 VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth); 863 Value *Vector = UndefValue::get(VectorType); 864 865 for (int i = 0; i < VectorWidth; i++) 866 Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst], 867 Builder.getInt32(i)); 868 869 VectorMap[Inst] = Vector; 870 } 871 872 int VectorBlockGenerator::getVectorWidth() { return GlobalMaps.size(); } 873 874 void VectorBlockGenerator::copyInstruction(ScopStmt &Stmt, 875 const Instruction *Inst, 876 ValueMapT &VectorMap, 877 VectorValueMapT &ScalarMaps) { 878 // Terminator instructions control the control flow. They are explicitly 879 // expressed in the clast and do not need to be copied. 880 if (Inst->isTerminator()) 881 return; 882 883 if (canSynthesize(Inst, &LI, &SE, &Stmt.getParent()->getRegion())) 884 return; 885 886 if (const LoadInst *Load = dyn_cast<LoadInst>(Inst)) { 887 generateLoad(Stmt, Load, VectorMap, ScalarMaps); 888 return; 889 } 890 891 if (hasVectorOperands(Inst, VectorMap)) { 892 if (const StoreInst *Store = dyn_cast<StoreInst>(Inst)) { 893 copyStore(Stmt, Store, VectorMap, ScalarMaps); 894 return; 895 } 896 897 if (const UnaryInstruction *Unary = dyn_cast<UnaryInstruction>(Inst)) { 898 copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps); 899 return; 900 } 901 902 if (const BinaryOperator *Binary = dyn_cast<BinaryOperator>(Inst)) { 903 copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps); 904 return; 905 } 906 907 // Falltrough: We generate scalar instructions, if we don't know how to 908 // generate vector code. 909 } 910 911 copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps); 912 } 913 914 void VectorBlockGenerator::copyStmt(ScopStmt &Stmt) { 915 assert(Stmt.isBlockStmt() && "TODO: Only block statements can be copied by " 916 "the vector block generator"); 917 918 BasicBlock *BB = Stmt.getBasicBlock(); 919 BasicBlock *CopyBB = 920 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 921 CopyBB->setName("polly.stmt." + BB->getName()); 922 Builder.SetInsertPoint(CopyBB->begin()); 923 924 // Create two maps that store the mapping from the original instructions of 925 // the old basic block to their copies in the new basic block. Those maps 926 // are basic block local. 927 // 928 // As vector code generation is supported there is one map for scalar values 929 // and one for vector values. 930 // 931 // In case we just do scalar code generation, the vectorMap is not used and 932 // the scalarMap has just one dimension, which contains the mapping. 933 // 934 // In case vector code generation is done, an instruction may either appear 935 // in the vector map once (as it is calculating >vectorwidth< values at a 936 // time. Or (if the values are calculated using scalar operations), it 937 // appears once in every dimension of the scalarMap. 938 VectorValueMapT ScalarBlockMap(getVectorWidth()); 939 ValueMapT VectorBlockMap; 940 941 for (Instruction &Inst : *BB) 942 copyInstruction(Stmt, &Inst, VectorBlockMap, ScalarBlockMap); 943 } 944 945 BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB, 946 BasicBlock *BBCopy) { 947 948 BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock(); 949 BasicBlock *BBCopyIDom = BlockMap.lookup(BBIDom); 950 951 if (BBCopyIDom) 952 DT.changeImmediateDominator(BBCopy, BBCopyIDom); 953 954 return BBCopyIDom; 955 } 956 957 void RegionGenerator::copyStmt(ScopStmt &Stmt, ValueMapT &GlobalMap, 958 LoopToScevMapT <S) { 959 assert(Stmt.isRegionStmt() && 960 "Only region statements can be copied by the region generator"); 961 962 // Forget all old mappings. 963 BlockMap.clear(); 964 RegionMaps.clear(); 965 IncompletePHINodeMap.clear(); 966 967 // The region represented by the statement. 968 Region *R = Stmt.getRegion(); 969 970 // Create a dedicated entry for the region where we can reload all demoted 971 // inputs. 972 BasicBlock *EntryBB = R->getEntry(); 973 BasicBlock *EntryBBCopy = 974 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 975 EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry"); 976 Builder.SetInsertPoint(EntryBBCopy->begin()); 977 978 for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI) 979 if (!R->contains(*PI)) 980 BlockMap[*PI] = EntryBBCopy; 981 982 // Iterate over all blocks in the region in a breadth-first search. 983 std::deque<BasicBlock *> Blocks; 984 SmallPtrSet<BasicBlock *, 8> SeenBlocks; 985 Blocks.push_back(EntryBB); 986 SeenBlocks.insert(EntryBB); 987 988 while (!Blocks.empty()) { 989 BasicBlock *BB = Blocks.front(); 990 Blocks.pop_front(); 991 992 // First split the block and update dominance information. 993 BasicBlock *BBCopy = splitBB(BB); 994 BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy); 995 996 // In order to remap PHI nodes we store also basic block mappings. 997 BlockMap[BB] = BBCopy; 998 999 // Get the mapping for this block and initialize it with the mapping 1000 // available at its immediate dominator (in the new region). 1001 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1002 RegionMap = RegionMaps[BBCopyIDom]; 1003 1004 // Copy the block with the BlockGenerator. 1005 copyBB(Stmt, BB, BBCopy, RegionMap, GlobalMap, LTS); 1006 1007 // In order to remap PHI nodes we store also basic block mappings. 1008 BlockMap[BB] = BBCopy; 1009 1010 // Add values to incomplete PHI nodes waiting for this block to be copied. 1011 for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB]) 1012 addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, 1013 GlobalMap, LTS); 1014 IncompletePHINodeMap[BB].clear(); 1015 1016 // And continue with new successors inside the region. 1017 for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++) 1018 if (R->contains(*SI) && SeenBlocks.insert(*SI).second) 1019 Blocks.push_back(*SI); 1020 } 1021 1022 // Now create a new dedicated region exit block and add it to the region map. 1023 BasicBlock *ExitBBCopy = 1024 SplitBlock(Builder.GetInsertBlock(), Builder.GetInsertPoint(), &DT, &LI); 1025 ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit"); 1026 BlockMap[R->getExit()] = ExitBBCopy; 1027 1028 repairDominance(R->getExit(), ExitBBCopy); 1029 1030 // As the block generator doesn't handle control flow we need to add the 1031 // region control flow by hand after all blocks have been copied. 1032 for (BasicBlock *BB : SeenBlocks) { 1033 1034 BranchInst *BI = cast<BranchInst>(BB->getTerminator()); 1035 1036 BasicBlock *BBCopy = BlockMap[BB]; 1037 Instruction *BICopy = BBCopy->getTerminator(); 1038 1039 ValueMapT &RegionMap = RegionMaps[BBCopy]; 1040 RegionMap.insert(BlockMap.begin(), BlockMap.end()); 1041 1042 Builder.SetInsertPoint(BICopy); 1043 copyInstScalar(Stmt, BI, RegionMap, GlobalMap, LTS); 1044 BICopy->eraseFromParent(); 1045 } 1046 1047 // Add counting PHI nodes to all loops in the region that can be used as 1048 // replacement for SCEVs refering to the old loop. 1049 for (BasicBlock *BB : SeenBlocks) { 1050 Loop *L = LI.getLoopFor(BB); 1051 if (L == nullptr || L->getHeader() != BB) 1052 continue; 1053 1054 BasicBlock *BBCopy = BlockMap[BB]; 1055 Value *NullVal = Builder.getInt32(0); 1056 PHINode *LoopPHI = 1057 PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv"); 1058 Instruction *LoopPHIInc = BinaryOperator::CreateAdd( 1059 LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc"); 1060 LoopPHI->insertBefore(BBCopy->begin()); 1061 LoopPHIInc->insertBefore(BBCopy->getTerminator()); 1062 1063 for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) { 1064 if (!R->contains(PredBB)) 1065 continue; 1066 if (L->contains(PredBB)) 1067 LoopPHI->addIncoming(LoopPHIInc, BlockMap[PredBB]); 1068 else 1069 LoopPHI->addIncoming(NullVal, BlockMap[PredBB]); 1070 } 1071 1072 for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy))) 1073 if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0) 1074 LoopPHI->addIncoming(NullVal, PredBBCopy); 1075 1076 LTS[L] = SE.getUnknown(LoopPHI); 1077 } 1078 1079 // Add all mappings from the region to the global map so outside uses will use 1080 // the copied instructions. 1081 for (auto &BBMap : RegionMaps) 1082 GlobalMap.insert(BBMap.second.begin(), BBMap.second.end()); 1083 1084 // Reset the old insert point for the build. 1085 Builder.SetInsertPoint(ExitBBCopy->begin()); 1086 } 1087 1088 void RegionGenerator::generateScalarLoads(ScopStmt &Stmt, 1089 const Instruction *Inst, 1090 ValueMapT &BBMap) { 1091 1092 // Inside a non-affine region PHI nodes are copied not demoted. Once the 1093 // phi is copied it will reload all inputs from outside the region, hence 1094 // we do not need to generate code for the read access of the operands of a 1095 // PHI. 1096 if (isa<PHINode>(Inst)) 1097 return; 1098 1099 return BlockGenerator::generateScalarLoads(Stmt, Inst, BBMap); 1100 } 1101 1102 void RegionGenerator::generateScalarStores(ScopStmt &Stmt, BasicBlock *BB, 1103 ValueMapT &BBMap, 1104 ValueMapT &GlobalMap) { 1105 const Region &R = Stmt.getParent()->getRegion(); 1106 1107 assert(Stmt.getRegion() && 1108 "Block statements need to use the generateScalarStores() " 1109 "function in the BlockGenerator"); 1110 1111 for (MemoryAccess *MA : Stmt) { 1112 1113 if (!MA->isScalar() || MA->isRead()) 1114 continue; 1115 1116 Instruction *ScalarBase = cast<Instruction>(MA->getBaseAddr()); 1117 Instruction *ScalarInst = MA->getAccessInstruction(); 1118 1119 // Only generate accesses that belong to this basic block. 1120 if (ScalarInst->getParent() != BB) 1121 continue; 1122 1123 Value *Val = MA->getAccessValue(); 1124 AllocaInst *ScalarAddr = nullptr; 1125 1126 if (MA->getScopArrayInfo()->isPHI()) 1127 ScalarAddr = getOrCreateAlloca(ScalarBase, PHIOpMap, ".phiops"); 1128 else 1129 ScalarAddr = getOrCreateAlloca(ScalarBase, ScalarMap, ".s2a"); 1130 1131 Val = getNewScalarValue(Val, R, ScalarMap, BBMap, GlobalMap); 1132 Builder.CreateStore(Val, ScalarAddr); 1133 } 1134 } 1135 1136 void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, const PHINode *PHI, 1137 PHINode *PHICopy, BasicBlock *IncomingBB, 1138 ValueMapT &GlobalMap, 1139 LoopToScevMapT <S) { 1140 Region *StmtR = Stmt.getRegion(); 1141 1142 // If the incoming block was not yet copied mark this PHI as incomplete. 1143 // Once the block will be copied the incoming value will be added. 1144 BasicBlock *BBCopy = BlockMap[IncomingBB]; 1145 if (!BBCopy) { 1146 assert(StmtR->contains(IncomingBB) && 1147 "Bad incoming block for PHI in non-affine region"); 1148 IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy)); 1149 return; 1150 } 1151 1152 Value *OpCopy = nullptr; 1153 if (StmtR->contains(IncomingBB)) { 1154 assert(RegionMaps.count(BBCopy) && 1155 "Incoming PHI block did not have a BBMap"); 1156 ValueMapT &BBCopyMap = RegionMaps[BBCopy]; 1157 1158 Value *Op = PHI->getIncomingValueForBlock(IncomingBB); 1159 OpCopy = 1160 getNewValue(Stmt, Op, BBCopyMap, GlobalMap, LTS, getLoopForInst(PHI)); 1161 } else { 1162 1163 if (PHICopy->getBasicBlockIndex(BBCopy) >= 0) 1164 return; 1165 1166 AllocaInst *PHIOpAddr = 1167 getOrCreateAlloca(const_cast<PHINode *>(PHI), PHIOpMap, ".phiops"); 1168 OpCopy = new LoadInst(PHIOpAddr, PHIOpAddr->getName() + ".reload", 1169 BlockMap[IncomingBB]->getTerminator()); 1170 } 1171 1172 assert(OpCopy && "Incoming PHI value was not copied properly"); 1173 assert(BBCopy && "Incoming PHI block was not copied properly"); 1174 PHICopy->addIncoming(OpCopy, BBCopy); 1175 } 1176 1177 Value *RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, const PHINode *PHI, 1178 ValueMapT &BBMap, 1179 ValueMapT &GlobalMap, 1180 LoopToScevMapT <S) { 1181 unsigned NumIncoming = PHI->getNumIncomingValues(); 1182 PHINode *PHICopy = 1183 Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName()); 1184 PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI()); 1185 BBMap[PHI] = PHICopy; 1186 1187 for (unsigned u = 0; u < NumIncoming; u++) 1188 addOperandToPHI(Stmt, PHI, PHICopy, PHI->getIncomingBlock(u), GlobalMap, 1189 LTS); 1190 return PHICopy; 1191 } 1192