1 //===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Copies from VGPR to SGPR registers are illegal and the register coalescer 11 /// will sometimes generate these illegal copies in situations like this: 12 /// 13 /// Register Class <vsrc> is the union of <vgpr> and <sgpr> 14 /// 15 /// BB0: 16 /// %0 <sgpr> = SCALAR_INST 17 /// %1 <vsrc> = COPY %0 <sgpr> 18 /// ... 19 /// BRANCH %cond BB1, BB2 20 /// BB1: 21 /// %2 <vgpr> = VECTOR_INST 22 /// %3 <vsrc> = COPY %2 <vgpr> 23 /// BB2: 24 /// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1> 25 /// %5 <vgpr> = VECTOR_INST %4 <vsrc> 26 /// 27 /// 28 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting 29 /// code will look like this: 30 /// 31 /// BB0: 32 /// %0 <sgpr> = SCALAR_INST 33 /// ... 34 /// BRANCH %cond BB1, BB2 35 /// BB1: 36 /// %2 <vgpr> = VECTOR_INST 37 /// %3 <vsrc> = COPY %2 <vgpr> 38 /// BB2: 39 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1> 40 /// %5 <vgpr> = VECTOR_INST %4 <sgpr> 41 /// 42 /// Now that the result of the PHI instruction is an SGPR, the register 43 /// allocator is now forced to constrain the register class of %3 to 44 /// <sgpr> so we end up with final code like this: 45 /// 46 /// BB0: 47 /// %0 <sgpr> = SCALAR_INST 48 /// ... 49 /// BRANCH %cond BB1, BB2 50 /// BB1: 51 /// %2 <vgpr> = VECTOR_INST 52 /// %3 <sgpr> = COPY %2 <vgpr> 53 /// BB2: 54 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1> 55 /// %5 <vgpr> = VECTOR_INST %4 <sgpr> 56 /// 57 /// Now this code contains an illegal copy from a VGPR to an SGPR. 58 /// 59 /// In order to avoid this problem, this pass searches for PHI instructions 60 /// which define a <vsrc> register and constrains its definition class to 61 /// <vgpr> if the user of the PHI's definition register is a vector instruction. 62 /// If the PHI's definition class is constrained to <vgpr> then the coalescer 63 /// will be unable to perform the COPY removal from the above example which 64 /// ultimately led to the creation of an illegal COPY. 65 //===----------------------------------------------------------------------===// 66 67 #include "AMDGPU.h" 68 #include "AMDGPUSubtarget.h" 69 #include "SIInstrInfo.h" 70 #include "SIRegisterInfo.h" 71 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/STLExtras.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/CodeGen/MachineBasicBlock.h" 77 #include "llvm/CodeGen/MachineDominators.h" 78 #include "llvm/CodeGen/MachineFunction.h" 79 #include "llvm/CodeGen/MachineFunctionPass.h" 80 #include "llvm/CodeGen/MachineInstr.h" 81 #include "llvm/CodeGen/MachineInstrBuilder.h" 82 #include "llvm/CodeGen/MachineOperand.h" 83 #include "llvm/CodeGen/MachineRegisterInfo.h" 84 #include "llvm/CodeGen/TargetRegisterInfo.h" 85 #include "llvm/Pass.h" 86 #include "llvm/Support/CodeGen.h" 87 #include "llvm/Support/CommandLine.h" 88 #include "llvm/Support/Debug.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include "llvm/Target/TargetMachine.h" 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <list> 95 #include <map> 96 #include <tuple> 97 #include <utility> 98 99 using namespace llvm; 100 101 #define DEBUG_TYPE "si-fix-sgpr-copies" 102 103 static cl::opt<bool> EnableM0Merge( 104 "amdgpu-enable-merge-m0", 105 cl::desc("Merge and hoist M0 initializations"), 106 cl::init(false)); 107 108 namespace { 109 110 class SIFixSGPRCopies : public MachineFunctionPass { 111 MachineDominatorTree *MDT; 112 113 public: 114 static char ID; 115 116 SIFixSGPRCopies() : MachineFunctionPass(ID) {} 117 118 bool runOnMachineFunction(MachineFunction &MF) override; 119 120 StringRef getPassName() const override { return "SI Fix SGPR copies"; } 121 122 void getAnalysisUsage(AnalysisUsage &AU) const override { 123 AU.addRequired<MachineDominatorTree>(); 124 AU.addPreserved<MachineDominatorTree>(); 125 AU.setPreservesCFG(); 126 MachineFunctionPass::getAnalysisUsage(AU); 127 } 128 }; 129 130 } // end anonymous namespace 131 132 INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE, 133 "SI Fix SGPR copies", false, false) 134 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 135 INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE, 136 "SI Fix SGPR copies", false, false) 137 138 char SIFixSGPRCopies::ID = 0; 139 140 char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID; 141 142 FunctionPass *llvm::createSIFixSGPRCopiesPass() { 143 return new SIFixSGPRCopies(); 144 } 145 146 static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) { 147 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 148 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 149 if (!MI.getOperand(i).isReg() || 150 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) 151 continue; 152 153 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg()))) 154 return true; 155 } 156 return false; 157 } 158 159 static std::pair<const TargetRegisterClass *, const TargetRegisterClass *> 160 getCopyRegClasses(const MachineInstr &Copy, 161 const SIRegisterInfo &TRI, 162 const MachineRegisterInfo &MRI) { 163 unsigned DstReg = Copy.getOperand(0).getReg(); 164 unsigned SrcReg = Copy.getOperand(1).getReg(); 165 166 const TargetRegisterClass *SrcRC = 167 TargetRegisterInfo::isVirtualRegister(SrcReg) ? 168 MRI.getRegClass(SrcReg) : 169 TRI.getPhysRegClass(SrcReg); 170 171 // We don't really care about the subregister here. 172 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg()); 173 174 const TargetRegisterClass *DstRC = 175 TargetRegisterInfo::isVirtualRegister(DstReg) ? 176 MRI.getRegClass(DstReg) : 177 TRI.getPhysRegClass(DstReg); 178 179 return std::make_pair(SrcRC, DstRC); 180 } 181 182 static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC, 183 const TargetRegisterClass *DstRC, 184 const SIRegisterInfo &TRI) { 185 return SrcRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(DstRC) && 186 TRI.hasVGPRs(SrcRC); 187 } 188 189 static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC, 190 const TargetRegisterClass *DstRC, 191 const SIRegisterInfo &TRI) { 192 return DstRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(SrcRC) && 193 TRI.hasVGPRs(DstRC); 194 } 195 196 static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI, 197 const SIRegisterInfo *TRI, 198 const SIInstrInfo *TII) { 199 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 200 auto &Src = MI.getOperand(1); 201 unsigned DstReg = MI.getOperand(0).getReg(); 202 unsigned SrcReg = Src.getReg(); 203 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || 204 !TargetRegisterInfo::isVirtualRegister(DstReg)) 205 return false; 206 207 for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) { 208 const auto *UseMI = MO.getParent(); 209 if (UseMI == &MI) 210 continue; 211 if (MO.isDef() || UseMI->getParent() != MI.getParent() || 212 UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END || 213 !TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src)) 214 return false; 215 } 216 // Change VGPR to SGPR destination. 217 MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg))); 218 return true; 219 } 220 221 // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE. 222 // 223 // SGPRx = ... 224 // SGPRy = REG_SEQUENCE SGPRx, sub0 ... 225 // VGPRz = COPY SGPRy 226 // 227 // ==> 228 // 229 // VGPRx = COPY SGPRx 230 // VGPRz = REG_SEQUENCE VGPRx, sub0 231 // 232 // This exposes immediate folding opportunities when materializing 64-bit 233 // immediates. 234 static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, 235 const SIRegisterInfo *TRI, 236 const SIInstrInfo *TII, 237 MachineRegisterInfo &MRI) { 238 assert(MI.isRegSequence()); 239 240 unsigned DstReg = MI.getOperand(0).getReg(); 241 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg))) 242 return false; 243 244 if (!MRI.hasOneUse(DstReg)) 245 return false; 246 247 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg); 248 if (!CopyUse.isCopy()) 249 return false; 250 251 // It is illegal to have vreg inputs to a physreg defining reg_sequence. 252 if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg())) 253 return false; 254 255 const TargetRegisterClass *SrcRC, *DstRC; 256 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI); 257 258 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) 259 return false; 260 261 if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII)) 262 return true; 263 264 // TODO: Could have multiple extracts? 265 unsigned SubReg = CopyUse.getOperand(1).getSubReg(); 266 if (SubReg != AMDGPU::NoSubRegister) 267 return false; 268 269 MRI.setRegClass(DstReg, DstRC); 270 271 // SGPRx = ... 272 // SGPRy = REG_SEQUENCE SGPRx, sub0 ... 273 // VGPRz = COPY SGPRy 274 275 // => 276 // VGPRx = COPY SGPRx 277 // VGPRz = REG_SEQUENCE VGPRx, sub0 278 279 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg()); 280 281 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) { 282 unsigned SrcReg = MI.getOperand(I).getReg(); 283 unsigned SrcSubReg = MI.getOperand(I).getSubReg(); 284 285 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); 286 assert(TRI->isSGPRClass(SrcRC) && 287 "Expected SGPR REG_SEQUENCE to only have SGPR inputs"); 288 289 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg); 290 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC); 291 292 unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC); 293 294 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), 295 TmpReg) 296 .add(MI.getOperand(I)); 297 298 MI.getOperand(I).setReg(TmpReg); 299 } 300 301 CopyUse.eraseFromParent(); 302 return true; 303 } 304 305 static bool phiHasVGPROperands(const MachineInstr &PHI, 306 const MachineRegisterInfo &MRI, 307 const SIRegisterInfo *TRI, 308 const SIInstrInfo *TII) { 309 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) { 310 unsigned Reg = PHI.getOperand(i).getReg(); 311 if (TRI->hasVGPRs(MRI.getRegClass(Reg))) 312 return true; 313 } 314 return false; 315 } 316 317 static bool phiHasBreakDef(const MachineInstr &PHI, 318 const MachineRegisterInfo &MRI, 319 SmallSet<unsigned, 8> &Visited) { 320 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) { 321 unsigned Reg = PHI.getOperand(i).getReg(); 322 if (Visited.count(Reg)) 323 continue; 324 325 Visited.insert(Reg); 326 327 MachineInstr *DefInstr = MRI.getVRegDef(Reg); 328 switch (DefInstr->getOpcode()) { 329 default: 330 break; 331 case AMDGPU::SI_IF_BREAK: 332 return true; 333 case AMDGPU::PHI: 334 if (phiHasBreakDef(*DefInstr, MRI, Visited)) 335 return true; 336 } 337 } 338 return false; 339 } 340 341 static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB, 342 const TargetRegisterInfo &TRI) { 343 for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(), 344 E = MBB.end(); I != E; ++I) { 345 if (I->modifiesRegister(AMDGPU::EXEC, &TRI)) 346 return true; 347 } 348 return false; 349 } 350 351 static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy, 352 const MachineInstr *MoveImm, 353 const SIInstrInfo *TII, 354 unsigned &SMovOp, 355 int64_t &Imm) { 356 if (Copy->getOpcode() != AMDGPU::COPY) 357 return false; 358 359 if (!MoveImm->isMoveImmediate()) 360 return false; 361 362 const MachineOperand *ImmOp = 363 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0); 364 if (!ImmOp->isImm()) 365 return false; 366 367 // FIXME: Handle copies with sub-regs. 368 if (Copy->getOperand(0).getSubReg()) 369 return false; 370 371 switch (MoveImm->getOpcode()) { 372 default: 373 return false; 374 case AMDGPU::V_MOV_B32_e32: 375 SMovOp = AMDGPU::S_MOV_B32; 376 break; 377 case AMDGPU::V_MOV_B64_PSEUDO: 378 SMovOp = AMDGPU::S_MOV_B64; 379 break; 380 } 381 Imm = ImmOp->getImm(); 382 return true; 383 } 384 385 template <class UnaryPredicate> 386 bool searchPredecessors(const MachineBasicBlock *MBB, 387 const MachineBasicBlock *CutOff, 388 UnaryPredicate Predicate) { 389 if (MBB == CutOff) 390 return false; 391 392 DenseSet<const MachineBasicBlock *> Visited; 393 SmallVector<MachineBasicBlock *, 4> Worklist(MBB->pred_begin(), 394 MBB->pred_end()); 395 396 while (!Worklist.empty()) { 397 MachineBasicBlock *MBB = Worklist.pop_back_val(); 398 399 if (!Visited.insert(MBB).second) 400 continue; 401 if (MBB == CutOff) 402 continue; 403 if (Predicate(MBB)) 404 return true; 405 406 Worklist.append(MBB->pred_begin(), MBB->pred_end()); 407 } 408 409 return false; 410 } 411 412 static bool predsHasDivergentTerminator(MachineBasicBlock *MBB, 413 const TargetRegisterInfo *TRI) { 414 return searchPredecessors(MBB, nullptr, [TRI](MachineBasicBlock *MBB) { 415 return hasTerminatorThatModifiesExec(*MBB, *TRI); }); 416 } 417 418 // Checks if there is potential path From instruction To instruction. 419 // If CutOff is specified and it sits in between of that path we ignore 420 // a higher portion of the path and report it is not reachable. 421 static bool isReachable(const MachineInstr *From, 422 const MachineInstr *To, 423 const MachineBasicBlock *CutOff, 424 MachineDominatorTree &MDT) { 425 // If either From block dominates To block or instructions are in the same 426 // block and From is higher. 427 if (MDT.dominates(From, To)) 428 return true; 429 430 const MachineBasicBlock *MBBFrom = From->getParent(); 431 const MachineBasicBlock *MBBTo = To->getParent(); 432 if (MBBFrom == MBBTo) 433 return false; 434 435 // Instructions are in different blocks, do predecessor search. 436 // We should almost never get here since we do not usually produce M0 stores 437 // other than -1. 438 return searchPredecessors(MBBTo, CutOff, [MBBFrom] 439 (const MachineBasicBlock *MBB) { return MBB == MBBFrom; }); 440 } 441 442 // Hoist and merge identical SGPR initializations into a common predecessor. 443 // This is intended to combine M0 initializations, but can work with any 444 // SGPR. A VGPR cannot be processed since we cannot guarantee vector 445 // executioon. 446 static bool hoistAndMergeSGPRInits(unsigned Reg, 447 const MachineRegisterInfo &MRI, 448 MachineDominatorTree &MDT) { 449 // List of inits by immediate value. 450 using InitListMap = std::map<unsigned, std::list<MachineInstr *>>; 451 InitListMap Inits; 452 // List of clobbering instructions. 453 SmallVector<MachineInstr*, 8> Clobbers; 454 bool Changed = false; 455 456 for (auto &MI : MRI.def_instructions(Reg)) { 457 MachineOperand *Imm = nullptr; 458 for (auto &MO: MI.operands()) { 459 if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) || 460 (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) { 461 Imm = nullptr; 462 break; 463 } else if (MO.isImm()) 464 Imm = &MO; 465 } 466 if (Imm) 467 Inits[Imm->getImm()].push_front(&MI); 468 else 469 Clobbers.push_back(&MI); 470 } 471 472 for (auto &Init : Inits) { 473 auto &Defs = Init.second; 474 475 for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) { 476 MachineInstr *MI1 = *I1; 477 478 for (auto I2 = std::next(I1); I2 != E; ) { 479 MachineInstr *MI2 = *I2; 480 481 // Check any possible interference 482 auto intereferes = [&](MachineBasicBlock::iterator From, 483 MachineBasicBlock::iterator To) -> bool { 484 485 assert(MDT.dominates(&*To, &*From)); 486 487 auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool { 488 const MachineBasicBlock *MBBFrom = From->getParent(); 489 const MachineBasicBlock *MBBTo = To->getParent(); 490 bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT); 491 bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT); 492 if (!MayClobberFrom && !MayClobberTo) 493 return false; 494 if ((MayClobberFrom && !MayClobberTo) || 495 (!MayClobberFrom && MayClobberTo)) 496 return true; 497 // Both can clobber, this is not an interference only if both are 498 // dominated by Clobber and belong to the same block or if Clobber 499 // properly dominates To, given that To >> From, so it dominates 500 // both and located in a common dominator. 501 return !((MBBFrom == MBBTo && 502 MDT.dominates(Clobber, &*From) && 503 MDT.dominates(Clobber, &*To)) || 504 MDT.properlyDominates(Clobber->getParent(), MBBTo)); 505 }; 506 507 return (llvm::any_of(Clobbers, interferes)) || 508 (llvm::any_of(Inits, [&](InitListMap::value_type &C) { 509 return C.first != Init.first && 510 llvm::any_of(C.second, interferes); 511 })); 512 }; 513 514 if (MDT.dominates(MI1, MI2)) { 515 if (!intereferes(MI2, MI1)) { 516 LLVM_DEBUG(dbgs() 517 << "Erasing from " 518 << printMBBReference(*MI2->getParent()) << " " << *MI2); 519 MI2->eraseFromParent(); 520 Defs.erase(I2++); 521 Changed = true; 522 continue; 523 } 524 } else if (MDT.dominates(MI2, MI1)) { 525 if (!intereferes(MI1, MI2)) { 526 LLVM_DEBUG(dbgs() 527 << "Erasing from " 528 << printMBBReference(*MI1->getParent()) << " " << *MI1); 529 MI1->eraseFromParent(); 530 Defs.erase(I1++); 531 Changed = true; 532 break; 533 } 534 } else { 535 auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(), 536 MI2->getParent()); 537 if (!MBB) { 538 ++I2; 539 continue; 540 } 541 542 MachineBasicBlock::iterator I = MBB->getFirstNonPHI(); 543 if (!intereferes(MI1, I) && !intereferes(MI2, I)) { 544 LLVM_DEBUG(dbgs() 545 << "Erasing from " 546 << printMBBReference(*MI1->getParent()) << " " << *MI1 547 << "and moving from " 548 << printMBBReference(*MI2->getParent()) << " to " 549 << printMBBReference(*I->getParent()) << " " << *MI2); 550 I->getParent()->splice(I, MI2->getParent(), MI2); 551 MI1->eraseFromParent(); 552 Defs.erase(I1++); 553 Changed = true; 554 break; 555 } 556 } 557 ++I2; 558 } 559 ++I1; 560 } 561 } 562 563 if (Changed) 564 MRI.clearKillFlags(Reg); 565 566 return Changed; 567 } 568 569 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { 570 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 571 MachineRegisterInfo &MRI = MF.getRegInfo(); 572 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 573 const SIInstrInfo *TII = ST.getInstrInfo(); 574 MDT = &getAnalysis<MachineDominatorTree>(); 575 576 SmallVector<MachineInstr *, 16> Worklist; 577 578 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 579 BI != BE; ++BI) { 580 MachineBasicBlock &MBB = *BI; 581 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 582 I != E; ++I) { 583 MachineInstr &MI = *I; 584 585 switch (MI.getOpcode()) { 586 default: 587 continue; 588 case AMDGPU::COPY: 589 case AMDGPU::WQM: 590 case AMDGPU::WWM: { 591 // If the destination register is a physical register there isn't really 592 // much we can do to fix this. 593 if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg())) 594 continue; 595 596 const TargetRegisterClass *SrcRC, *DstRC; 597 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI); 598 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) { 599 unsigned SrcReg = MI.getOperand(1).getReg(); 600 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { 601 TII->moveToVALU(MI, MDT); 602 break; 603 } 604 605 MachineInstr *DefMI = MRI.getVRegDef(SrcReg); 606 unsigned SMovOp; 607 int64_t Imm; 608 // If we are just copying an immediate, we can replace the copy with 609 // s_mov_b32. 610 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) { 611 MI.getOperand(1).ChangeToImmediate(Imm); 612 MI.addImplicitDefUseOperands(MF); 613 MI.setDesc(TII->get(SMovOp)); 614 break; 615 } 616 TII->moveToVALU(MI, MDT); 617 } else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) { 618 tryChangeVGPRtoSGPRinCopy(MI, TRI, TII); 619 } 620 621 break; 622 } 623 case AMDGPU::PHI: { 624 unsigned Reg = MI.getOperand(0).getReg(); 625 if (!TRI->isSGPRClass(MRI.getRegClass(Reg))) 626 break; 627 628 // We don't need to fix the PHI if the common dominator of the 629 // two incoming blocks terminates with a uniform branch. 630 bool HasVGPROperand = phiHasVGPROperands(MI, MRI, TRI, TII); 631 if (MI.getNumExplicitOperands() == 5 && !HasVGPROperand) { 632 MachineBasicBlock *MBB0 = MI.getOperand(2).getMBB(); 633 MachineBasicBlock *MBB1 = MI.getOperand(4).getMBB(); 634 635 if (!predsHasDivergentTerminator(MBB0, TRI) && 636 !predsHasDivergentTerminator(MBB1, TRI)) { 637 LLVM_DEBUG(dbgs() 638 << "Not fixing PHI for uniform branch: " << MI << '\n'); 639 break; 640 } 641 } 642 643 // If a PHI node defines an SGPR and any of its operands are VGPRs, 644 // then we need to move it to the VALU. 645 // 646 // Also, if a PHI node defines an SGPR and has all SGPR operands 647 // we must move it to the VALU, because the SGPR operands will 648 // all end up being assigned the same register, which means 649 // there is a potential for a conflict if different threads take 650 // different control flow paths. 651 // 652 // For Example: 653 // 654 // sgpr0 = def; 655 // ... 656 // sgpr1 = def; 657 // ... 658 // sgpr2 = PHI sgpr0, sgpr1 659 // use sgpr2; 660 // 661 // Will Become: 662 // 663 // sgpr2 = def; 664 // ... 665 // sgpr2 = def; 666 // ... 667 // use sgpr2 668 // 669 // The one exception to this rule is when one of the operands 670 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK 671 // instruction. In this case, there we know the program will 672 // never enter the second block (the loop) without entering 673 // the first block (where the condition is computed), so there 674 // is no chance for values to be over-written. 675 676 SmallSet<unsigned, 8> Visited; 677 if (HasVGPROperand || !phiHasBreakDef(MI, MRI, Visited)) { 678 LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI); 679 TII->moveToVALU(MI, MDT); 680 } 681 682 break; 683 } 684 case AMDGPU::REG_SEQUENCE: 685 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) || 686 !hasVGPROperands(MI, TRI)) { 687 foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI); 688 continue; 689 } 690 691 LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI); 692 693 TII->moveToVALU(MI, MDT); 694 break; 695 case AMDGPU::INSERT_SUBREG: { 696 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC; 697 DstRC = MRI.getRegClass(MI.getOperand(0).getReg()); 698 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg()); 699 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg()); 700 if (TRI->isSGPRClass(DstRC) && 701 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) { 702 LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI); 703 TII->moveToVALU(MI, MDT); 704 } 705 break; 706 } 707 } 708 } 709 } 710 711 if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge) 712 hoistAndMergeSGPRInits(AMDGPU::M0, MRI, *MDT); 713 714 return true; 715 } 716