1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 /// \file 8 //===----------------------------------------------------------------------===// 9 // 10 11 #include "AMDGPU.h" 12 #include "AMDGPUSubtarget.h" 13 #include "SIInstrInfo.h" 14 #include "SIMachineFunctionInfo.h" 15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 16 #include "llvm/ADT/DepthFirstIterator.h" 17 #include "llvm/ADT/SetVector.h" 18 #include "llvm/CodeGen/LiveIntervals.h" 19 #include "llvm/CodeGen/MachineFunctionPass.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Support/raw_ostream.h" 24 #include "llvm/Target/TargetMachine.h" 25 26 #define DEBUG_TYPE "si-fold-operands" 27 using namespace llvm; 28 29 namespace { 30 31 struct FoldCandidate { 32 MachineInstr *UseMI; 33 union { 34 MachineOperand *OpToFold; 35 uint64_t ImmToFold; 36 int FrameIndexToFold; 37 }; 38 int ShrinkOpcode; 39 unsigned char UseOpNo; 40 MachineOperand::MachineOperandType Kind; 41 bool Commuted; 42 43 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp, 44 bool Commuted_ = false, 45 int ShrinkOp = -1) : 46 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo), 47 Kind(FoldOp->getType()), 48 Commuted(Commuted_) { 49 if (FoldOp->isImm()) { 50 ImmToFold = FoldOp->getImm(); 51 } else if (FoldOp->isFI()) { 52 FrameIndexToFold = FoldOp->getIndex(); 53 } else { 54 assert(FoldOp->isReg() || FoldOp->isGlobal()); 55 OpToFold = FoldOp; 56 } 57 } 58 59 bool isFI() const { 60 return Kind == MachineOperand::MO_FrameIndex; 61 } 62 63 bool isImm() const { 64 return Kind == MachineOperand::MO_Immediate; 65 } 66 67 bool isReg() const { 68 return Kind == MachineOperand::MO_Register; 69 } 70 71 bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; } 72 73 bool isCommuted() const { 74 return Commuted; 75 } 76 77 bool needsShrink() const { 78 return ShrinkOpcode != -1; 79 } 80 81 int getShrinkOpcode() const { 82 return ShrinkOpcode; 83 } 84 }; 85 86 class SIFoldOperands : public MachineFunctionPass { 87 public: 88 static char ID; 89 MachineRegisterInfo *MRI; 90 const SIInstrInfo *TII; 91 const SIRegisterInfo *TRI; 92 const GCNSubtarget *ST; 93 const SIMachineFunctionInfo *MFI; 94 95 void foldOperand(MachineOperand &OpToFold, 96 MachineInstr *UseMI, 97 int UseOpIdx, 98 SmallVectorImpl<FoldCandidate> &FoldList, 99 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const; 100 101 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const; 102 103 const MachineOperand *isClamp(const MachineInstr &MI) const; 104 bool tryFoldClamp(MachineInstr &MI); 105 106 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const; 107 bool tryFoldOMod(MachineInstr &MI); 108 109 public: 110 SIFoldOperands() : MachineFunctionPass(ID) { 111 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 112 } 113 114 bool runOnMachineFunction(MachineFunction &MF) override; 115 116 StringRef getPassName() const override { return "SI Fold Operands"; } 117 118 void getAnalysisUsage(AnalysisUsage &AU) const override { 119 AU.setPreservesCFG(); 120 MachineFunctionPass::getAnalysisUsage(AU); 121 } 122 }; 123 124 } // End anonymous namespace. 125 126 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 127 "SI Fold Operands", false, false) 128 129 char SIFoldOperands::ID = 0; 130 131 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 132 133 // Wrapper around isInlineConstant that understands special cases when 134 // instruction types are replaced during operand folding. 135 static bool isInlineConstantIfFolded(const SIInstrInfo *TII, 136 const MachineInstr &UseMI, 137 unsigned OpNo, 138 const MachineOperand &OpToFold) { 139 if (TII->isInlineConstant(UseMI, OpNo, OpToFold)) 140 return true; 141 142 unsigned Opc = UseMI.getOpcode(); 143 switch (Opc) { 144 case AMDGPU::V_MAC_F32_e64: 145 case AMDGPU::V_MAC_F16_e64: 146 case AMDGPU::V_FMAC_F32_e64: 147 case AMDGPU::V_FMAC_F16_e64: { 148 // Special case for mac. Since this is replaced with mad when folded into 149 // src2, we need to check the legality for the final instruction. 150 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 151 if (static_cast<int>(OpNo) == Src2Idx) { 152 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || 153 Opc == AMDGPU::V_FMAC_F16_e64; 154 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || 155 Opc == AMDGPU::V_FMAC_F32_e64; 156 157 unsigned Opc = IsFMA ? 158 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : 159 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 160 const MCInstrDesc &MadDesc = TII->get(Opc); 161 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType); 162 } 163 return false; 164 } 165 default: 166 return false; 167 } 168 } 169 170 // TODO: Add heuristic that the frame index might not fit in the addressing mode 171 // immediate offset to avoid materializing in loops. 172 static bool frameIndexMayFold(const SIInstrInfo *TII, 173 const MachineInstr &UseMI, 174 int OpNo, 175 const MachineOperand &OpToFold) { 176 return OpToFold.isFI() && 177 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) && 178 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr); 179 } 180 181 FunctionPass *llvm::createSIFoldOperandsPass() { 182 return new SIFoldOperands(); 183 } 184 185 static bool updateOperand(FoldCandidate &Fold, 186 const SIInstrInfo &TII, 187 const TargetRegisterInfo &TRI, 188 const GCNSubtarget &ST) { 189 MachineInstr *MI = Fold.UseMI; 190 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 191 assert(Old.isReg()); 192 193 if (Fold.isImm()) { 194 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked && 195 !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) && 196 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold), 197 ST.hasInv2PiInlineImm())) { 198 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is 199 // already set. 200 unsigned Opcode = MI->getOpcode(); 201 int OpNo = MI->getOperandNo(&Old); 202 int ModIdx = -1; 203 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) 204 ModIdx = AMDGPU::OpName::src0_modifiers; 205 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) 206 ModIdx = AMDGPU::OpName::src1_modifiers; 207 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) 208 ModIdx = AMDGPU::OpName::src2_modifiers; 209 assert(ModIdx != -1); 210 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); 211 MachineOperand &Mod = MI->getOperand(ModIdx); 212 unsigned Val = Mod.getImm(); 213 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1)) 214 return false; 215 // Only apply the following transformation if that operand requries 216 // a packed immediate. 217 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) { 218 case AMDGPU::OPERAND_REG_IMM_V2FP16: 219 case AMDGPU::OPERAND_REG_IMM_V2INT16: 220 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 221 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 222 // If upper part is all zero we do not need op_sel_hi. 223 if (!isUInt<16>(Fold.ImmToFold)) { 224 if (!(Fold.ImmToFold & 0xffff)) { 225 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0); 226 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 227 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff); 228 return true; 229 } 230 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); 231 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff); 232 return true; 233 } 234 break; 235 default: 236 break; 237 } 238 } 239 } 240 241 if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) { 242 MachineBasicBlock *MBB = MI->getParent(); 243 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16); 244 if (Liveness != MachineBasicBlock::LQR_Dead) { 245 LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n"); 246 return false; 247 } 248 249 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 250 int Op32 = Fold.getShrinkOpcode(); 251 MachineOperand &Dst0 = MI->getOperand(0); 252 MachineOperand &Dst1 = MI->getOperand(1); 253 assert(Dst0.isDef() && Dst1.isDef()); 254 255 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg()); 256 257 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg()); 258 Register NewReg0 = MRI.createVirtualRegister(Dst0RC); 259 260 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32); 261 262 if (HaveNonDbgCarryUse) { 263 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg()) 264 .addReg(AMDGPU::VCC, RegState::Kill); 265 } 266 267 // Keep the old instruction around to avoid breaking iterators, but 268 // replace it with a dummy instruction to remove uses. 269 // 270 // FIXME: We should not invert how this pass looks at operands to avoid 271 // this. Should track set of foldable movs instead of looking for uses 272 // when looking at a use. 273 Dst0.setReg(NewReg0); 274 for (unsigned I = MI->getNumOperands() - 1; I > 0; --I) 275 MI->RemoveOperand(I); 276 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF)); 277 278 if (Fold.isCommuted()) 279 TII.commuteInstruction(*Inst32, false); 280 return true; 281 } 282 283 assert(!Fold.needsShrink() && "not handled"); 284 285 if (Fold.isImm()) { 286 Old.ChangeToImmediate(Fold.ImmToFold); 287 return true; 288 } 289 290 if (Fold.isGlobal()) { 291 Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(), 292 Fold.OpToFold->getTargetFlags()); 293 return true; 294 } 295 296 if (Fold.isFI()) { 297 Old.ChangeToFrameIndex(Fold.FrameIndexToFold); 298 return true; 299 } 300 301 MachineOperand *New = Fold.OpToFold; 302 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 303 Old.setIsUndef(New->isUndef()); 304 return true; 305 } 306 307 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, 308 const MachineInstr *MI) { 309 for (auto Candidate : FoldList) { 310 if (Candidate.UseMI == MI) 311 return true; 312 } 313 return false; 314 } 315 316 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList, 317 MachineInstr *MI, unsigned OpNo, 318 MachineOperand *FoldOp, bool Commuted = false, 319 int ShrinkOp = -1) { 320 // Skip additional folding on the same operand. 321 for (FoldCandidate &Fold : FoldList) 322 if (Fold.UseMI == MI && Fold.UseOpNo == OpNo) 323 return; 324 LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal") 325 << " operand " << OpNo << "\n " << *MI << '\n'); 326 FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp)); 327 } 328 329 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 330 MachineInstr *MI, unsigned OpNo, 331 MachineOperand *OpToFold, 332 const SIInstrInfo *TII) { 333 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 334 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2 335 unsigned Opc = MI->getOpcode(); 336 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 337 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) && 338 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 339 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 || 340 Opc == AMDGPU::V_FMAC_F16_e64; 341 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 || 342 Opc == AMDGPU::V_FMAC_F32_e64; 343 unsigned NewOpc = IsFMA ? 344 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) : 345 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 346 347 // Check if changing this to a v_mad_{f16, f32} instruction will allow us 348 // to fold the operand. 349 MI->setDesc(TII->get(NewOpc)); 350 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 351 if (FoldAsMAD) { 352 MI->untieRegOperand(OpNo); 353 return true; 354 } 355 MI->setDesc(TII->get(Opc)); 356 } 357 358 // Special case for s_setreg_b32 359 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) { 360 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32)); 361 appendFoldCandidate(FoldList, MI, OpNo, OpToFold); 362 return true; 363 } 364 365 // If we are already folding into another operand of MI, then 366 // we can't commute the instruction, otherwise we risk making the 367 // other fold illegal. 368 if (isUseMIInFoldList(FoldList, MI)) 369 return false; 370 371 unsigned CommuteOpNo = OpNo; 372 373 // Operand is not legal, so try to commute the instruction to 374 // see if this makes it possible to fold. 375 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 376 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 377 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1); 378 379 if (CanCommute) { 380 if (CommuteIdx0 == OpNo) 381 CommuteOpNo = CommuteIdx1; 382 else if (CommuteIdx1 == OpNo) 383 CommuteOpNo = CommuteIdx0; 384 } 385 386 387 // One of operands might be an Imm operand, and OpNo may refer to it after 388 // the call of commuteInstruction() below. Such situations are avoided 389 // here explicitly as OpNo must be a register operand to be a candidate 390 // for memory folding. 391 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 392 !MI->getOperand(CommuteIdx1).isReg())) 393 return false; 394 395 if (!CanCommute || 396 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1)) 397 return false; 398 399 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) { 400 if ((Opc == AMDGPU::V_ADD_I32_e64 || 401 Opc == AMDGPU::V_SUB_I32_e64 || 402 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME 403 (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) { 404 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 405 406 // Verify the other operand is a VGPR, otherwise we would violate the 407 // constant bus restriction. 408 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0; 409 MachineOperand &OtherOp = MI->getOperand(OtherIdx); 410 if (!OtherOp.isReg() || 411 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg())) 412 return false; 413 414 assert(MI->getOperand(1).isDef()); 415 416 // Make sure to get the 32-bit version of the commuted opcode. 417 unsigned MaybeCommutedOpc = MI->getOpcode(); 418 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc); 419 420 appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32); 421 return true; 422 } 423 424 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1); 425 return false; 426 } 427 428 appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true); 429 return true; 430 } 431 432 // Check the case where we might introduce a second constant operand to a 433 // scalar instruction 434 if (TII->isSALU(MI->getOpcode())) { 435 const MCInstrDesc &InstDesc = MI->getDesc(); 436 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 437 const SIRegisterInfo &SRI = TII->getRegisterInfo(); 438 439 // Fine if the operand can be encoded as an inline constant 440 if (OpToFold->isImm()) { 441 if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) || 442 !TII->isInlineConstant(*OpToFold, OpInfo)) { 443 // Otherwise check for another constant 444 for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) { 445 auto &Op = MI->getOperand(i); 446 if (OpNo != i && 447 TII->isLiteralConstantLike(Op, OpInfo)) { 448 return false; 449 } 450 } 451 } 452 } 453 } 454 455 appendFoldCandidate(FoldList, MI, OpNo, OpToFold); 456 return true; 457 } 458 459 // If the use operand doesn't care about the value, this may be an operand only 460 // used for register indexing, in which case it is unsafe to fold. 461 static bool isUseSafeToFold(const SIInstrInfo *TII, 462 const MachineInstr &MI, 463 const MachineOperand &UseMO) { 464 return !UseMO.isUndef() && !TII->isSDWA(MI); 465 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg()); 466 } 467 468 // Find a def of the UseReg, check if it is a reg_seqence and find initializers 469 // for each subreg, tracking it to foldable inline immediate if possible. 470 // Returns true on success. 471 static bool getRegSeqInit( 472 SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs, 473 Register UseReg, uint8_t OpTy, 474 const SIInstrInfo *TII, const MachineRegisterInfo &MRI) { 475 MachineInstr *Def = MRI.getUniqueVRegDef(UseReg); 476 if (!Def || !Def->isRegSequence()) 477 return false; 478 479 for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) { 480 MachineOperand *Sub = &Def->getOperand(I); 481 assert (Sub->isReg()); 482 483 for (MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub->getReg()); 484 SubDef && Sub->isReg() && !Sub->getSubReg() && 485 TII->isFoldableCopy(*SubDef); 486 SubDef = MRI.getUniqueVRegDef(Sub->getReg())) { 487 MachineOperand *Op = &SubDef->getOperand(1); 488 if (Op->isImm()) { 489 if (TII->isInlineConstant(*Op, OpTy)) 490 Sub = Op; 491 break; 492 } 493 if (!Op->isReg()) 494 break; 495 Sub = Op; 496 } 497 498 Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm())); 499 } 500 501 return true; 502 } 503 504 static bool tryToFoldACImm(const SIInstrInfo *TII, 505 const MachineOperand &OpToFold, 506 MachineInstr *UseMI, 507 unsigned UseOpIdx, 508 SmallVectorImpl<FoldCandidate> &FoldList) { 509 const MCInstrDesc &Desc = UseMI->getDesc(); 510 const MCOperandInfo *OpInfo = Desc.OpInfo; 511 if (!OpInfo || UseOpIdx >= Desc.getNumOperands()) 512 return false; 513 514 uint8_t OpTy = OpInfo[UseOpIdx].OperandType; 515 if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST || 516 OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) 517 return false; 518 519 if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) && 520 TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) { 521 UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm()); 522 return true; 523 } 524 525 if (!OpToFold.isReg()) 526 return false; 527 528 Register UseReg = OpToFold.getReg(); 529 if (!Register::isVirtualRegister(UseReg)) 530 return false; 531 532 if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) { 533 return FC.UseMI == UseMI; }) != FoldList.end()) 534 return false; 535 536 MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo(); 537 SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs; 538 if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI)) 539 return false; 540 541 int32_t Imm; 542 for (unsigned I = 0, E = Defs.size(); I != E; ++I) { 543 const MachineOperand *Op = Defs[I].first; 544 if (!Op->isImm()) 545 return false; 546 547 auto SubImm = Op->getImm(); 548 if (!I) { 549 Imm = SubImm; 550 if (!TII->isInlineConstant(*Op, OpTy) || 551 !TII->isOperandLegal(*UseMI, UseOpIdx, Op)) 552 return false; 553 554 continue; 555 } 556 if (Imm != SubImm) 557 return false; // Can only fold splat constants 558 } 559 560 appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first); 561 return true; 562 } 563 564 void SIFoldOperands::foldOperand( 565 MachineOperand &OpToFold, 566 MachineInstr *UseMI, 567 int UseOpIdx, 568 SmallVectorImpl<FoldCandidate> &FoldList, 569 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const { 570 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 571 572 if (!isUseSafeToFold(TII, *UseMI, UseOp)) 573 return; 574 575 // FIXME: Fold operands with subregs. 576 if (UseOp.isReg() && OpToFold.isReg()) { 577 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) 578 return; 579 } 580 581 // Special case for REG_SEQUENCE: We can't fold literals into 582 // REG_SEQUENCE instructions, so we have to fold them into the 583 // uses of REG_SEQUENCE. 584 if (UseMI->isRegSequence()) { 585 Register RegSeqDstReg = UseMI->getOperand(0).getReg(); 586 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 587 588 MachineRegisterInfo::use_iterator Next; 589 for (MachineRegisterInfo::use_iterator 590 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end(); 591 RSUse != RSE; RSUse = Next) { 592 Next = std::next(RSUse); 593 594 MachineInstr *RSUseMI = RSUse->getParent(); 595 596 if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI, 597 RSUse.getOperandNo(), FoldList)) 598 continue; 599 600 if (RSUse->getSubReg() != RegSeqDstSubReg) 601 continue; 602 603 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 604 CopiesToReplace); 605 } 606 607 return; 608 } 609 610 if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList)) 611 return; 612 613 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) { 614 // Sanity check that this is a stack access. 615 // FIXME: Should probably use stack pseudos before frame lowering. 616 MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset); 617 if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() && 618 SOff->getReg() != MFI->getStackPtrOffsetReg())) 619 return; 620 621 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() != 622 MFI->getScratchRSrcReg()) 623 return; 624 625 // A frame index will resolve to a positive constant, so it should always be 626 // safe to fold the addressing mode, even pre-GFX9. 627 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex()); 628 SOff->setReg(MFI->getStackPtrOffsetReg()); 629 return; 630 } 631 632 bool FoldingImmLike = 633 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 634 635 if (FoldingImmLike && UseMI->isCopy()) { 636 Register DestReg = UseMI->getOperand(0).getReg(); 637 638 // Don't fold into a copy to a physical register. Doing so would interfere 639 // with the register coalescer's logic which would avoid redundant 640 // initalizations. 641 if (DestReg.isPhysical()) 642 return; 643 644 const TargetRegisterClass *DestRC = MRI->getRegClass(DestReg); 645 646 Register SrcReg = UseMI->getOperand(1).getReg(); 647 if (SrcReg.isVirtual()) { // XXX - This can be an assert? 648 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); 649 if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) { 650 MachineRegisterInfo::use_iterator NextUse; 651 SmallVector<FoldCandidate, 4> CopyUses; 652 for (MachineRegisterInfo::use_iterator 653 Use = MRI->use_begin(DestReg), E = MRI->use_end(); 654 Use != E; Use = NextUse) { 655 NextUse = std::next(Use); 656 FoldCandidate FC = FoldCandidate(Use->getParent(), 657 Use.getOperandNo(), &UseMI->getOperand(1)); 658 CopyUses.push_back(FC); 659 } 660 for (auto & F : CopyUses) { 661 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, 662 FoldList, CopiesToReplace); 663 } 664 } 665 } 666 667 if (DestRC == &AMDGPU::AGPR_32RegClass && 668 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { 669 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); 670 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 671 CopiesToReplace.push_back(UseMI); 672 return; 673 } 674 675 // In order to fold immediates into copies, we need to change the 676 // copy to a MOV. 677 678 unsigned MovOp = TII->getMovOpcode(DestRC); 679 if (MovOp == AMDGPU::COPY) 680 return; 681 682 UseMI->setDesc(TII->get(MovOp)); 683 MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin(); 684 MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end(); 685 while (ImpOpI != ImpOpE) { 686 MachineInstr::mop_iterator Tmp = ImpOpI; 687 ImpOpI++; 688 UseMI->RemoveOperand(UseMI->getOperandNo(Tmp)); 689 } 690 CopiesToReplace.push_back(UseMI); 691 } else { 692 if (UseMI->isCopy() && OpToFold.isReg() && 693 UseMI->getOperand(0).getReg().isVirtual() && 694 !UseMI->getOperand(1).getSubReg()) { 695 LLVM_DEBUG(dbgs() << "Folding " << OpToFold 696 << "\n into " << *UseMI << '\n'); 697 unsigned Size = TII->getOpSize(*UseMI, 1); 698 Register UseReg = OpToFold.getReg(); 699 UseMI->getOperand(1).setReg(UseReg); 700 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 701 UseMI->getOperand(1).setIsKill(false); 702 CopiesToReplace.push_back(UseMI); 703 OpToFold.setIsKill(false); 704 705 // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32 706 // can only accept VGPR or inline immediate. Recreate a reg_sequence with 707 // its initializers right here, so we will rematerialize immediates and 708 // avoid copies via different reg classes. 709 SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs; 710 if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) && 711 getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII, 712 *MRI)) { 713 const DebugLoc &DL = UseMI->getDebugLoc(); 714 MachineBasicBlock &MBB = *UseMI->getParent(); 715 716 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE)); 717 for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I) 718 UseMI->RemoveOperand(I); 719 720 MachineInstrBuilder B(*MBB.getParent(), UseMI); 721 DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies; 722 SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs; 723 for (unsigned I = 0; I < Size / 4; ++I) { 724 MachineOperand *Def = Defs[I].first; 725 TargetInstrInfo::RegSubRegPair CopyToVGPR; 726 if (Def->isImm() && 727 TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { 728 int64_t Imm = Def->getImm(); 729 730 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 731 BuildMI(MBB, UseMI, DL, 732 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm); 733 B.addReg(Tmp); 734 } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) { 735 auto Src = getRegSubRegPair(*Def); 736 Def->setIsKill(false); 737 if (!SeenAGPRs.insert(Src)) { 738 // We cannot build a reg_sequence out of the same registers, they 739 // must be copied. Better do it here before copyPhysReg() created 740 // several reads to do the AGPR->VGPR->AGPR copy. 741 CopyToVGPR = Src; 742 } else { 743 B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0, 744 Src.SubReg); 745 } 746 } else { 747 assert(Def->isReg()); 748 Def->setIsKill(false); 749 auto Src = getRegSubRegPair(*Def); 750 751 // Direct copy from SGPR to AGPR is not possible. To avoid creation 752 // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later, 753 // create a copy here and track if we already have such a copy. 754 if (TRI->isSGPRReg(*MRI, Src.Reg)) { 755 CopyToVGPR = Src; 756 } else { 757 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 758 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def); 759 B.addReg(Tmp); 760 } 761 } 762 763 if (CopyToVGPR.Reg) { 764 Register Vgpr; 765 if (VGPRCopies.count(CopyToVGPR)) { 766 Vgpr = VGPRCopies[CopyToVGPR]; 767 } else { 768 Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 769 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def); 770 VGPRCopies[CopyToVGPR] = Vgpr; 771 } 772 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); 773 BuildMI(MBB, UseMI, DL, 774 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr); 775 B.addReg(Tmp); 776 } 777 778 B.addImm(Defs[I].second); 779 } 780 LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n'); 781 return; 782 } 783 784 if (Size != 4) 785 return; 786 if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) && 787 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg())) 788 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32)); 789 else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) && 790 TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg())) 791 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32)); 792 return; 793 } 794 795 unsigned UseOpc = UseMI->getOpcode(); 796 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 || 797 (UseOpc == AMDGPU::V_READLANE_B32 && 798 (int)UseOpIdx == 799 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) { 800 // %vgpr = V_MOV_B32 imm 801 // %sgpr = V_READFIRSTLANE_B32 %vgpr 802 // => 803 // %sgpr = S_MOV_B32 imm 804 if (FoldingImmLike) { 805 if (execMayBeModifiedBeforeUse(*MRI, 806 UseMI->getOperand(UseOpIdx).getReg(), 807 *OpToFold.getParent(), 808 *UseMI)) 809 return; 810 811 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); 812 813 // FIXME: ChangeToImmediate should clear subreg 814 UseMI->getOperand(1).setSubReg(0); 815 if (OpToFold.isImm()) 816 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm()); 817 else 818 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex()); 819 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 820 return; 821 } 822 823 if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) { 824 if (execMayBeModifiedBeforeUse(*MRI, 825 UseMI->getOperand(UseOpIdx).getReg(), 826 *OpToFold.getParent(), 827 *UseMI)) 828 return; 829 830 // %vgpr = COPY %sgpr0 831 // %sgpr1 = V_READFIRSTLANE_B32 %vgpr 832 // => 833 // %sgpr1 = COPY %sgpr0 834 UseMI->setDesc(TII->get(AMDGPU::COPY)); 835 UseMI->getOperand(1).setReg(OpToFold.getReg()); 836 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg()); 837 UseMI->getOperand(1).setIsKill(false); 838 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane) 839 return; 840 } 841 } 842 843 const MCInstrDesc &UseDesc = UseMI->getDesc(); 844 845 // Don't fold into target independent nodes. Target independent opcodes 846 // don't have defined register classes. 847 if (UseDesc.isVariadic() || 848 UseOp.isImplicit() || 849 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 850 return; 851 } 852 853 if (!FoldingImmLike) { 854 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 855 856 // FIXME: We could try to change the instruction from 64-bit to 32-bit 857 // to enable more folding opportunites. The shrink operands pass 858 // already does this. 859 return; 860 } 861 862 863 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); 864 const TargetRegisterClass *FoldRC = 865 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); 866 867 // Split 64-bit constants into 32-bits for folding. 868 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { 869 Register UseReg = UseOp.getReg(); 870 const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg); 871 872 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) 873 return; 874 875 APInt Imm(64, OpToFold.getImm()); 876 if (UseOp.getSubReg() == AMDGPU::sub0) { 877 Imm = Imm.getLoBits(32); 878 } else { 879 assert(UseOp.getSubReg() == AMDGPU::sub1); 880 Imm = Imm.getHiBits(32); 881 } 882 883 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 884 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 885 return; 886 } 887 888 889 890 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 891 } 892 893 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result, 894 uint32_t LHS, uint32_t RHS) { 895 switch (Opcode) { 896 case AMDGPU::V_AND_B32_e64: 897 case AMDGPU::V_AND_B32_e32: 898 case AMDGPU::S_AND_B32: 899 Result = LHS & RHS; 900 return true; 901 case AMDGPU::V_OR_B32_e64: 902 case AMDGPU::V_OR_B32_e32: 903 case AMDGPU::S_OR_B32: 904 Result = LHS | RHS; 905 return true; 906 case AMDGPU::V_XOR_B32_e64: 907 case AMDGPU::V_XOR_B32_e32: 908 case AMDGPU::S_XOR_B32: 909 Result = LHS ^ RHS; 910 return true; 911 case AMDGPU::V_LSHL_B32_e64: 912 case AMDGPU::V_LSHL_B32_e32: 913 case AMDGPU::S_LSHL_B32: 914 // The instruction ignores the high bits for out of bounds shifts. 915 Result = LHS << (RHS & 31); 916 return true; 917 case AMDGPU::V_LSHLREV_B32_e64: 918 case AMDGPU::V_LSHLREV_B32_e32: 919 Result = RHS << (LHS & 31); 920 return true; 921 case AMDGPU::V_LSHR_B32_e64: 922 case AMDGPU::V_LSHR_B32_e32: 923 case AMDGPU::S_LSHR_B32: 924 Result = LHS >> (RHS & 31); 925 return true; 926 case AMDGPU::V_LSHRREV_B32_e64: 927 case AMDGPU::V_LSHRREV_B32_e32: 928 Result = RHS >> (LHS & 31); 929 return true; 930 case AMDGPU::V_ASHR_I32_e64: 931 case AMDGPU::V_ASHR_I32_e32: 932 case AMDGPU::S_ASHR_I32: 933 Result = static_cast<int32_t>(LHS) >> (RHS & 31); 934 return true; 935 case AMDGPU::V_ASHRREV_I32_e64: 936 case AMDGPU::V_ASHRREV_I32_e32: 937 Result = static_cast<int32_t>(RHS) >> (LHS & 31); 938 return true; 939 default: 940 return false; 941 } 942 } 943 944 static unsigned getMovOpc(bool IsScalar) { 945 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 946 } 947 948 /// Remove any leftover implicit operands from mutating the instruction. e.g. 949 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def 950 /// anymore. 951 static void stripExtraCopyOperands(MachineInstr &MI) { 952 const MCInstrDesc &Desc = MI.getDesc(); 953 unsigned NumOps = Desc.getNumOperands() + 954 Desc.getNumImplicitUses() + 955 Desc.getNumImplicitDefs(); 956 957 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I) 958 MI.RemoveOperand(I); 959 } 960 961 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { 962 MI.setDesc(NewDesc); 963 stripExtraCopyOperands(MI); 964 } 965 966 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI, 967 MachineOperand &Op) { 968 if (Op.isReg()) { 969 // If this has a subregister, it obviously is a register source. 970 if (Op.getSubReg() != AMDGPU::NoSubRegister || 971 !Register::isVirtualRegister(Op.getReg())) 972 return &Op; 973 974 MachineInstr *Def = MRI.getVRegDef(Op.getReg()); 975 if (Def && Def->isMoveImmediate()) { 976 MachineOperand &ImmSrc = Def->getOperand(1); 977 if (ImmSrc.isImm()) 978 return &ImmSrc; 979 } 980 } 981 982 return &Op; 983 } 984 985 // Try to simplify operations with a constant that may appear after instruction 986 // selection. 987 // TODO: See if a frame index with a fixed offset can fold. 988 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, 989 const SIInstrInfo *TII, 990 MachineInstr *MI, 991 MachineOperand *ImmOp) { 992 unsigned Opc = MI->getOpcode(); 993 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || 994 Opc == AMDGPU::S_NOT_B32) { 995 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm()); 996 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); 997 return true; 998 } 999 1000 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1001 if (Src1Idx == -1) 1002 return false; 1003 1004 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1005 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); 1006 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx)); 1007 1008 if (!Src0->isImm() && !Src1->isImm()) 1009 return false; 1010 1011 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) { 1012 if (Src0->isImm() && Src0->getImm() == 0) { 1013 // v_lshl_or_b32 0, X, Y -> copy Y 1014 // v_lshl_or_b32 0, X, K -> v_mov_b32 K 1015 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg(); 1016 MI->RemoveOperand(Src1Idx); 1017 MI->RemoveOperand(Src0Idx); 1018 1019 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32)); 1020 return true; 1021 } 1022 } 1023 1024 // and k0, k1 -> v_mov_b32 (k0 & k1) 1025 // or k0, k1 -> v_mov_b32 (k0 | k1) 1026 // xor k0, k1 -> v_mov_b32 (k0 ^ k1) 1027 if (Src0->isImm() && Src1->isImm()) { 1028 int32_t NewImm; 1029 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 1030 return false; 1031 1032 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 1033 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg()); 1034 1035 // Be careful to change the right operand, src0 may belong to a different 1036 // instruction. 1037 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm); 1038 MI->RemoveOperand(Src1Idx); 1039 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); 1040 return true; 1041 } 1042 1043 if (!MI->isCommutable()) 1044 return false; 1045 1046 if (Src0->isImm() && !Src1->isImm()) { 1047 std::swap(Src0, Src1); 1048 std::swap(Src0Idx, Src1Idx); 1049 } 1050 1051 int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); 1052 if (Opc == AMDGPU::V_OR_B32_e64 || 1053 Opc == AMDGPU::V_OR_B32_e32 || 1054 Opc == AMDGPU::S_OR_B32) { 1055 if (Src1Val == 0) { 1056 // y = or x, 0 => y = copy x 1057 MI->RemoveOperand(Src1Idx); 1058 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 1059 } else if (Src1Val == -1) { 1060 // y = or x, -1 => y = v_mov_b32 -1 1061 MI->RemoveOperand(Src1Idx); 1062 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); 1063 } else 1064 return false; 1065 1066 return true; 1067 } 1068 1069 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 || 1070 MI->getOpcode() == AMDGPU::V_AND_B32_e32 || 1071 MI->getOpcode() == AMDGPU::S_AND_B32) { 1072 if (Src1Val == 0) { 1073 // y = and x, 0 => y = v_mov_b32 0 1074 MI->RemoveOperand(Src0Idx); 1075 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); 1076 } else if (Src1Val == -1) { 1077 // y = and x, -1 => y = copy x 1078 MI->RemoveOperand(Src1Idx); 1079 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 1080 stripExtraCopyOperands(*MI); 1081 } else 1082 return false; 1083 1084 return true; 1085 } 1086 1087 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 || 1088 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 || 1089 MI->getOpcode() == AMDGPU::S_XOR_B32) { 1090 if (Src1Val == 0) { 1091 // y = xor x, 0 => y = copy x 1092 MI->RemoveOperand(Src1Idx); 1093 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 1094 return true; 1095 } 1096 } 1097 1098 return false; 1099 } 1100 1101 // Try to fold an instruction into a simpler one 1102 static bool tryFoldInst(const SIInstrInfo *TII, 1103 MachineInstr *MI) { 1104 unsigned Opc = MI->getOpcode(); 1105 1106 if (Opc == AMDGPU::V_CNDMASK_B32_e32 || 1107 Opc == AMDGPU::V_CNDMASK_B32_e64 || 1108 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) { 1109 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); 1110 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1); 1111 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); 1112 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); 1113 if (Src1->isIdenticalTo(*Src0) && 1114 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) && 1115 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) { 1116 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into "); 1117 auto &NewDesc = 1118 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); 1119 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 1120 if (Src2Idx != -1) 1121 MI->RemoveOperand(Src2Idx); 1122 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); 1123 if (Src1ModIdx != -1) 1124 MI->RemoveOperand(Src1ModIdx); 1125 if (Src0ModIdx != -1) 1126 MI->RemoveOperand(Src0ModIdx); 1127 mutateCopyOp(*MI, NewDesc); 1128 LLVM_DEBUG(dbgs() << *MI << '\n'); 1129 return true; 1130 } 1131 } 1132 1133 return false; 1134 } 1135 1136 void SIFoldOperands::foldInstOperand(MachineInstr &MI, 1137 MachineOperand &OpToFold) const { 1138 // We need mutate the operands of new mov instructions to add implicit 1139 // uses of EXEC, but adding them invalidates the use_iterator, so defer 1140 // this. 1141 SmallVector<MachineInstr *, 4> CopiesToReplace; 1142 SmallVector<FoldCandidate, 4> FoldList; 1143 MachineOperand &Dst = MI.getOperand(0); 1144 1145 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 1146 if (FoldingImm) { 1147 unsigned NumLiteralUses = 0; 1148 MachineOperand *NonInlineUse = nullptr; 1149 int NonInlineUseOpNo = -1; 1150 1151 MachineRegisterInfo::use_iterator NextUse; 1152 for (MachineRegisterInfo::use_iterator 1153 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 1154 Use != E; Use = NextUse) { 1155 NextUse = std::next(Use); 1156 MachineInstr *UseMI = Use->getParent(); 1157 unsigned OpNo = Use.getOperandNo(); 1158 1159 // Folding the immediate may reveal operations that can be constant 1160 // folded or replaced with a copy. This can happen for example after 1161 // frame indices are lowered to constants or from splitting 64-bit 1162 // constants. 1163 // 1164 // We may also encounter cases where one or both operands are 1165 // immediates materialized into a register, which would ordinarily not 1166 // be folded due to multiple uses or operand constraints. 1167 1168 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) { 1169 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n'); 1170 1171 // Some constant folding cases change the same immediate's use to a new 1172 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user 1173 // again. The same constant folded instruction could also have a second 1174 // use operand. 1175 NextUse = MRI->use_begin(Dst.getReg()); 1176 FoldList.clear(); 1177 continue; 1178 } 1179 1180 // Try to fold any inline immediate uses, and then only fold other 1181 // constants if they have one use. 1182 // 1183 // The legality of the inline immediate must be checked based on the use 1184 // operand, not the defining instruction, because 32-bit instructions 1185 // with 32-bit inline immediate sources may be used to materialize 1186 // constants used in 16-bit operands. 1187 // 1188 // e.g. it is unsafe to fold: 1189 // s_mov_b32 s0, 1.0 // materializes 0x3f800000 1190 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00 1191 1192 // Folding immediates with more than one use will increase program size. 1193 // FIXME: This will also reduce register usage, which may be better 1194 // in some cases. A better heuristic is needed. 1195 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) { 1196 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace); 1197 } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) { 1198 foldOperand(OpToFold, UseMI, OpNo, FoldList, 1199 CopiesToReplace); 1200 } else { 1201 if (++NumLiteralUses == 1) { 1202 NonInlineUse = &*Use; 1203 NonInlineUseOpNo = OpNo; 1204 } 1205 } 1206 } 1207 1208 if (NumLiteralUses == 1) { 1209 MachineInstr *UseMI = NonInlineUse->getParent(); 1210 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace); 1211 } 1212 } else { 1213 // Folding register. 1214 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess; 1215 for (MachineRegisterInfo::use_iterator 1216 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 1217 Use != E; ++Use) { 1218 UsesToProcess.push_back(Use); 1219 } 1220 for (auto U : UsesToProcess) { 1221 MachineInstr *UseMI = U->getParent(); 1222 1223 foldOperand(OpToFold, UseMI, U.getOperandNo(), 1224 FoldList, CopiesToReplace); 1225 } 1226 } 1227 1228 MachineFunction *MF = MI.getParent()->getParent(); 1229 // Make sure we add EXEC uses to any new v_mov instructions created. 1230 for (MachineInstr *Copy : CopiesToReplace) 1231 Copy->addImplicitDefUseOperands(*MF); 1232 1233 for (FoldCandidate &Fold : FoldList) { 1234 assert(!Fold.isReg() || Fold.OpToFold); 1235 if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) { 1236 Register Reg = Fold.OpToFold->getReg(); 1237 MachineInstr *DefMI = Fold.OpToFold->getParent(); 1238 if (DefMI->readsRegister(AMDGPU::EXEC, TRI) && 1239 execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI)) 1240 continue; 1241 } 1242 if (updateOperand(Fold, *TII, *TRI, *ST)) { 1243 // Clear kill flags. 1244 if (Fold.isReg()) { 1245 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 1246 // FIXME: Probably shouldn't bother trying to fold if not an 1247 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 1248 // copies. 1249 MRI->clearKillFlags(Fold.OpToFold->getReg()); 1250 } 1251 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " 1252 << static_cast<int>(Fold.UseOpNo) << " of " 1253 << *Fold.UseMI << '\n'); 1254 tryFoldInst(TII, Fold.UseMI); 1255 } else if (Fold.isCommuted()) { 1256 // Restoring instruction's original operand order if fold has failed. 1257 TII->commuteInstruction(*Fold.UseMI, false); 1258 } 1259 } 1260 } 1261 1262 // Clamp patterns are canonically selected to v_max_* instructions, so only 1263 // handle them. 1264 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const { 1265 unsigned Op = MI.getOpcode(); 1266 switch (Op) { 1267 case AMDGPU::V_MAX_F32_e64: 1268 case AMDGPU::V_MAX_F16_e64: 1269 case AMDGPU::V_MAX_F64: 1270 case AMDGPU::V_PK_MAX_F16: { 1271 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) 1272 return nullptr; 1273 1274 // Make sure sources are identical. 1275 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1276 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1277 if (!Src0->isReg() || !Src1->isReg() || 1278 Src0->getReg() != Src1->getReg() || 1279 Src0->getSubReg() != Src1->getSubReg() || 1280 Src0->getSubReg() != AMDGPU::NoSubRegister) 1281 return nullptr; 1282 1283 // Can't fold up if we have modifiers. 1284 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1285 return nullptr; 1286 1287 unsigned Src0Mods 1288 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); 1289 unsigned Src1Mods 1290 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); 1291 1292 // Having a 0 op_sel_hi would require swizzling the output in the source 1293 // instruction, which we can't do. 1294 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 1295 : 0u; 1296 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods) 1297 return nullptr; 1298 return Src0; 1299 } 1300 default: 1301 return nullptr; 1302 } 1303 } 1304 1305 // We obviously have multiple uses in a clamp since the register is used twice 1306 // in the same instruction. 1307 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) { 1308 int Count = 0; 1309 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 1310 I != E; ++I) { 1311 if (++Count > 1) 1312 return false; 1313 } 1314 1315 return true; 1316 } 1317 1318 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel. 1319 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) { 1320 const MachineOperand *ClampSrc = isClamp(MI); 1321 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg())) 1322 return false; 1323 1324 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg()); 1325 1326 // The type of clamp must be compatible. 1327 if (TII->getClampMask(*Def) != TII->getClampMask(MI)) 1328 return false; 1329 1330 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); 1331 if (!DefClamp) 1332 return false; 1333 1334 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def 1335 << '\n'); 1336 1337 // Clamp is applied after omod, so it is OK if omod is set. 1338 DefClamp->setImm(1); 1339 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1340 MI.eraseFromParent(); 1341 return true; 1342 } 1343 1344 static int getOModValue(unsigned Opc, int64_t Val) { 1345 switch (Opc) { 1346 case AMDGPU::V_MUL_F32_e64: { 1347 switch (static_cast<uint32_t>(Val)) { 1348 case 0x3f000000: // 0.5 1349 return SIOutMods::DIV2; 1350 case 0x40000000: // 2.0 1351 return SIOutMods::MUL2; 1352 case 0x40800000: // 4.0 1353 return SIOutMods::MUL4; 1354 default: 1355 return SIOutMods::NONE; 1356 } 1357 } 1358 case AMDGPU::V_MUL_F16_e64: { 1359 switch (static_cast<uint16_t>(Val)) { 1360 case 0x3800: // 0.5 1361 return SIOutMods::DIV2; 1362 case 0x4000: // 2.0 1363 return SIOutMods::MUL2; 1364 case 0x4400: // 4.0 1365 return SIOutMods::MUL4; 1366 default: 1367 return SIOutMods::NONE; 1368 } 1369 } 1370 default: 1371 llvm_unreachable("invalid mul opcode"); 1372 } 1373 } 1374 1375 // FIXME: Does this really not support denormals with f16? 1376 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not 1377 // handled, so will anything other than that break? 1378 std::pair<const MachineOperand *, int> 1379 SIFoldOperands::isOMod(const MachineInstr &MI) const { 1380 unsigned Op = MI.getOpcode(); 1381 switch (Op) { 1382 case AMDGPU::V_MUL_F32_e64: 1383 case AMDGPU::V_MUL_F16_e64: { 1384 // If output denormals are enabled, omod is ignored. 1385 if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32Denormals) || 1386 (Op == AMDGPU::V_MUL_F16_e64 && MFI->getMode().FP64FP16Denormals)) 1387 return std::make_pair(nullptr, SIOutMods::NONE); 1388 1389 const MachineOperand *RegOp = nullptr; 1390 const MachineOperand *ImmOp = nullptr; 1391 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1392 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1393 if (Src0->isImm()) { 1394 ImmOp = Src0; 1395 RegOp = Src1; 1396 } else if (Src1->isImm()) { 1397 ImmOp = Src1; 1398 RegOp = Src0; 1399 } else 1400 return std::make_pair(nullptr, SIOutMods::NONE); 1401 1402 int OMod = getOModValue(Op, ImmOp->getImm()); 1403 if (OMod == SIOutMods::NONE || 1404 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 1405 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 1406 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || 1407 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) 1408 return std::make_pair(nullptr, SIOutMods::NONE); 1409 1410 return std::make_pair(RegOp, OMod); 1411 } 1412 case AMDGPU::V_ADD_F32_e64: 1413 case AMDGPU::V_ADD_F16_e64: { 1414 // If output denormals are enabled, omod is ignored. 1415 if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32Denormals) || 1416 (Op == AMDGPU::V_ADD_F16_e64 && MFI->getMode().FP64FP16Denormals)) 1417 return std::make_pair(nullptr, SIOutMods::NONE); 1418 1419 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x 1420 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1421 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1422 1423 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() && 1424 Src0->getSubReg() == Src1->getSubReg() && 1425 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && 1426 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && 1427 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && 1428 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 1429 return std::make_pair(Src0, SIOutMods::MUL2); 1430 1431 return std::make_pair(nullptr, SIOutMods::NONE); 1432 } 1433 default: 1434 return std::make_pair(nullptr, SIOutMods::NONE); 1435 } 1436 } 1437 1438 // FIXME: Does this need to check IEEE bit on function? 1439 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { 1440 const MachineOperand *RegOp; 1441 int OMod; 1442 std::tie(RegOp, OMod) = isOMod(MI); 1443 if (OMod == SIOutMods::NONE || !RegOp->isReg() || 1444 RegOp->getSubReg() != AMDGPU::NoSubRegister || 1445 !hasOneNonDBGUseInst(*MRI, RegOp->getReg())) 1446 return false; 1447 1448 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); 1449 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); 1450 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) 1451 return false; 1452 1453 // Clamp is applied after omod. If the source already has clamp set, don't 1454 // fold it. 1455 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) 1456 return false; 1457 1458 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n'); 1459 1460 DefOMod->setImm(OMod); 1461 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 1462 MI.eraseFromParent(); 1463 return true; 1464 } 1465 1466 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 1467 if (skipFunction(MF.getFunction())) 1468 return false; 1469 1470 MRI = &MF.getRegInfo(); 1471 ST = &MF.getSubtarget<GCNSubtarget>(); 1472 TII = ST->getInstrInfo(); 1473 TRI = &TII->getRegisterInfo(); 1474 MFI = MF.getInfo<SIMachineFunctionInfo>(); 1475 1476 // omod is ignored by hardware if IEEE bit is enabled. omod also does not 1477 // correctly handle signed zeros. 1478 // 1479 // FIXME: Also need to check strictfp 1480 bool IsIEEEMode = MFI->getMode().IEEE; 1481 bool HasNSZ = MFI->hasNoSignedZerosFPMath(); 1482 1483 for (MachineBasicBlock *MBB : depth_first(&MF)) { 1484 MachineBasicBlock::iterator I, Next; 1485 1486 MachineOperand *CurrentKnownM0Val = nullptr; 1487 for (I = MBB->begin(); I != MBB->end(); I = Next) { 1488 Next = std::next(I); 1489 MachineInstr &MI = *I; 1490 1491 tryFoldInst(TII, &MI); 1492 1493 if (!TII->isFoldableCopy(MI)) { 1494 // Saw an unknown clobber of m0, so we no longer know what it is. 1495 if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI)) 1496 CurrentKnownM0Val = nullptr; 1497 1498 // TODO: Omod might be OK if there is NSZ only on the source 1499 // instruction, and not the omod multiply. 1500 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) || 1501 !tryFoldOMod(MI)) 1502 tryFoldClamp(MI); 1503 1504 continue; 1505 } 1506 1507 // Specially track simple redefs of m0 to the same value in a block, so we 1508 // can erase the later ones. 1509 if (MI.getOperand(0).getReg() == AMDGPU::M0) { 1510 MachineOperand &NewM0Val = MI.getOperand(1); 1511 if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) { 1512 MI.eraseFromParent(); 1513 continue; 1514 } 1515 1516 // We aren't tracking other physical registers 1517 CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ? 1518 nullptr : &NewM0Val; 1519 continue; 1520 } 1521 1522 MachineOperand &OpToFold = MI.getOperand(1); 1523 bool FoldingImm = 1524 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal(); 1525 1526 // FIXME: We could also be folding things like TargetIndexes. 1527 if (!FoldingImm && !OpToFold.isReg()) 1528 continue; 1529 1530 if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg())) 1531 continue; 1532 1533 // Prevent folding operands backwards in the function. For example, 1534 // the COPY opcode must not be replaced by 1 in this example: 1535 // 1536 // %3 = COPY %vgpr0; VGPR_32:%3 1537 // ... 1538 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec 1539 MachineOperand &Dst = MI.getOperand(0); 1540 if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg())) 1541 continue; 1542 1543 foldInstOperand(MI, OpToFold); 1544 } 1545 } 1546 return true; 1547 } 1548