1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// \file 9 //===----------------------------------------------------------------------===// 10 // 11 12 #include "AMDGPU.h" 13 #include "AMDGPUSubtarget.h" 14 #include "SIInstrInfo.h" 15 #include "SIMachineFunctionInfo.h" 16 #include "llvm/ADT/DepthFirstIterator.h" 17 #include "llvm/CodeGen/LiveIntervals.h" 18 #include "llvm/CodeGen/MachineFunctionPass.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include "llvm/Target/TargetMachine.h" 24 25 #define DEBUG_TYPE "si-fold-operands" 26 using namespace llvm; 27 28 namespace { 29 30 struct FoldCandidate { 31 MachineInstr *UseMI; 32 union { 33 MachineOperand *OpToFold; 34 uint64_t ImmToFold; 35 int FrameIndexToFold; 36 }; 37 unsigned char UseOpNo; 38 MachineOperand::MachineOperandType Kind; 39 bool Commuted; 40 41 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp, 42 bool Commuted_ = false) : 43 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()), 44 Commuted(Commuted_) { 45 if (FoldOp->isImm()) { 46 ImmToFold = FoldOp->getImm(); 47 } else if (FoldOp->isFI()) { 48 FrameIndexToFold = FoldOp->getIndex(); 49 } else { 50 assert(FoldOp->isReg()); 51 OpToFold = FoldOp; 52 } 53 } 54 55 bool isFI() const { 56 return Kind == MachineOperand::MO_FrameIndex; 57 } 58 59 bool isImm() const { 60 return Kind == MachineOperand::MO_Immediate; 61 } 62 63 bool isReg() const { 64 return Kind == MachineOperand::MO_Register; 65 } 66 67 bool isCommuted() const { 68 return Commuted; 69 } 70 }; 71 72 class SIFoldOperands : public MachineFunctionPass { 73 public: 74 static char ID; 75 MachineRegisterInfo *MRI; 76 const SIInstrInfo *TII; 77 const SIRegisterInfo *TRI; 78 const SISubtarget *ST; 79 80 void foldOperand(MachineOperand &OpToFold, 81 MachineInstr *UseMI, 82 unsigned UseOpIdx, 83 SmallVectorImpl<FoldCandidate> &FoldList, 84 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const; 85 86 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const; 87 88 const MachineOperand *isClamp(const MachineInstr &MI) const; 89 bool tryFoldClamp(MachineInstr &MI); 90 91 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const; 92 bool tryFoldOMod(MachineInstr &MI); 93 94 public: 95 SIFoldOperands() : MachineFunctionPass(ID) { 96 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 97 } 98 99 bool runOnMachineFunction(MachineFunction &MF) override; 100 101 StringRef getPassName() const override { return "SI Fold Operands"; } 102 103 void getAnalysisUsage(AnalysisUsage &AU) const override { 104 AU.setPreservesCFG(); 105 MachineFunctionPass::getAnalysisUsage(AU); 106 } 107 }; 108 109 } // End anonymous namespace. 110 111 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 112 "SI Fold Operands", false, false) 113 114 char SIFoldOperands::ID = 0; 115 116 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 117 118 // Wrapper around isInlineConstant that understands special cases when 119 // instruction types are replaced during operand folding. 120 static bool isInlineConstantIfFolded(const SIInstrInfo *TII, 121 const MachineInstr &UseMI, 122 unsigned OpNo, 123 const MachineOperand &OpToFold) { 124 if (TII->isInlineConstant(UseMI, OpNo, OpToFold)) 125 return true; 126 127 unsigned Opc = UseMI.getOpcode(); 128 switch (Opc) { 129 case AMDGPU::V_MAC_F32_e64: 130 case AMDGPU::V_MAC_F16_e64: { 131 // Special case for mac. Since this is replaced with mad when folded into 132 // src2, we need to check the legality for the final instruction. 133 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 134 if (static_cast<int>(OpNo) == Src2Idx) { 135 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64; 136 const MCInstrDesc &MadDesc 137 = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); 138 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType); 139 } 140 return false; 141 } 142 default: 143 return false; 144 } 145 } 146 147 FunctionPass *llvm::createSIFoldOperandsPass() { 148 return new SIFoldOperands(); 149 } 150 151 static bool updateOperand(FoldCandidate &Fold, 152 const TargetRegisterInfo &TRI) { 153 MachineInstr *MI = Fold.UseMI; 154 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 155 assert(Old.isReg()); 156 157 if (Fold.isImm()) { 158 Old.ChangeToImmediate(Fold.ImmToFold); 159 return true; 160 } 161 162 if (Fold.isFI()) { 163 Old.ChangeToFrameIndex(Fold.FrameIndexToFold); 164 return true; 165 } 166 167 MachineOperand *New = Fold.OpToFold; 168 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) && 169 TargetRegisterInfo::isVirtualRegister(New->getReg())) { 170 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 171 172 Old.setIsUndef(New->isUndef()); 173 return true; 174 } 175 176 // FIXME: Handle physical registers. 177 178 return false; 179 } 180 181 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, 182 const MachineInstr *MI) { 183 for (auto Candidate : FoldList) { 184 if (Candidate.UseMI == MI) 185 return true; 186 } 187 return false; 188 } 189 190 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 191 MachineInstr *MI, unsigned OpNo, 192 MachineOperand *OpToFold, 193 const SIInstrInfo *TII) { 194 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 195 196 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2 197 unsigned Opc = MI->getOpcode(); 198 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) && 199 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 200 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64; 201 202 // Check if changing this to a v_mad_{f16, f32} instruction will allow us 203 // to fold the operand. 204 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16)); 205 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 206 if (FoldAsMAD) { 207 MI->untieRegOperand(OpNo); 208 return true; 209 } 210 MI->setDesc(TII->get(Opc)); 211 } 212 213 // Special case for s_setreg_b32 214 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) { 215 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32)); 216 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 217 return true; 218 } 219 220 // If we are already folding into another operand of MI, then 221 // we can't commute the instruction, otherwise we risk making the 222 // other fold illegal. 223 if (isUseMIInFoldList(FoldList, MI)) 224 return false; 225 226 // Operand is not legal, so try to commute the instruction to 227 // see if this makes it possible to fold. 228 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 229 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 230 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1); 231 232 if (CanCommute) { 233 if (CommuteIdx0 == OpNo) 234 OpNo = CommuteIdx1; 235 else if (CommuteIdx1 == OpNo) 236 OpNo = CommuteIdx0; 237 } 238 239 // One of operands might be an Imm operand, and OpNo may refer to it after 240 // the call of commuteInstruction() below. Such situations are avoided 241 // here explicitly as OpNo must be a register operand to be a candidate 242 // for memory folding. 243 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 244 !MI->getOperand(CommuteIdx1).isReg())) 245 return false; 246 247 if (!CanCommute || 248 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1)) 249 return false; 250 251 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 252 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1); 253 return false; 254 } 255 256 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold, true)); 257 return true; 258 } 259 260 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 261 return true; 262 } 263 264 // If the use operand doesn't care about the value, this may be an operand only 265 // used for register indexing, in which case it is unsafe to fold. 266 static bool isUseSafeToFold(const SIInstrInfo *TII, 267 const MachineInstr &MI, 268 const MachineOperand &UseMO) { 269 return !UseMO.isUndef() && !TII->isSDWA(MI); 270 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg()); 271 } 272 273 void SIFoldOperands::foldOperand( 274 MachineOperand &OpToFold, 275 MachineInstr *UseMI, 276 unsigned UseOpIdx, 277 SmallVectorImpl<FoldCandidate> &FoldList, 278 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const { 279 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 280 281 if (!isUseSafeToFold(TII, *UseMI, UseOp)) 282 return; 283 284 // FIXME: Fold operands with subregs. 285 if (UseOp.isReg() && OpToFold.isReg()) { 286 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) 287 return; 288 289 // Don't fold subregister extracts into tied operands, only if it is a full 290 // copy since a subregister use tied to a full register def doesn't really 291 // make sense. e.g. don't fold: 292 // 293 // %1 = COPY %0:sub1 294 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0> 295 // 296 // into 297 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0> 298 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister) 299 return; 300 } 301 302 // Special case for REG_SEQUENCE: We can't fold literals into 303 // REG_SEQUENCE instructions, so we have to fold them into the 304 // uses of REG_SEQUENCE. 305 if (UseMI->isRegSequence()) { 306 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg(); 307 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 308 309 for (MachineRegisterInfo::use_iterator 310 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end(); 311 RSUse != RSE; ++RSUse) { 312 313 MachineInstr *RSUseMI = RSUse->getParent(); 314 if (RSUse->getSubReg() != RegSeqDstSubReg) 315 continue; 316 317 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 318 CopiesToReplace); 319 } 320 321 return; 322 } 323 324 325 bool FoldingImm = OpToFold.isImm(); 326 327 // In order to fold immediates into copies, we need to change the 328 // copy to a MOV. 329 if (FoldingImm && UseMI->isCopy()) { 330 unsigned DestReg = UseMI->getOperand(0).getReg(); 331 const TargetRegisterClass *DestRC 332 = TargetRegisterInfo::isVirtualRegister(DestReg) ? 333 MRI->getRegClass(DestReg) : 334 TRI->getPhysRegClass(DestReg); 335 336 unsigned MovOp = TII->getMovOpcode(DestRC); 337 if (MovOp == AMDGPU::COPY) 338 return; 339 340 UseMI->setDesc(TII->get(MovOp)); 341 CopiesToReplace.push_back(UseMI); 342 } else { 343 const MCInstrDesc &UseDesc = UseMI->getDesc(); 344 345 // Don't fold into target independent nodes. Target independent opcodes 346 // don't have defined register classes. 347 if (UseDesc.isVariadic() || 348 UseOp.isImplicit() || 349 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 350 return; 351 } 352 353 if (!FoldingImm) { 354 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 355 356 // FIXME: We could try to change the instruction from 64-bit to 32-bit 357 // to enable more folding opportunites. The shrink operands pass 358 // already does this. 359 return; 360 } 361 362 363 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); 364 const TargetRegisterClass *FoldRC = 365 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); 366 367 368 // Split 64-bit constants into 32-bits for folding. 369 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { 370 unsigned UseReg = UseOp.getReg(); 371 const TargetRegisterClass *UseRC 372 = TargetRegisterInfo::isVirtualRegister(UseReg) ? 373 MRI->getRegClass(UseReg) : 374 TRI->getPhysRegClass(UseReg); 375 376 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) 377 return; 378 379 APInt Imm(64, OpToFold.getImm()); 380 if (UseOp.getSubReg() == AMDGPU::sub0) { 381 Imm = Imm.getLoBits(32); 382 } else { 383 assert(UseOp.getSubReg() == AMDGPU::sub1); 384 Imm = Imm.getHiBits(32); 385 } 386 387 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 388 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 389 return; 390 } 391 392 393 394 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 395 } 396 397 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result, 398 uint32_t LHS, uint32_t RHS) { 399 switch (Opcode) { 400 case AMDGPU::V_AND_B32_e64: 401 case AMDGPU::V_AND_B32_e32: 402 case AMDGPU::S_AND_B32: 403 Result = LHS & RHS; 404 return true; 405 case AMDGPU::V_OR_B32_e64: 406 case AMDGPU::V_OR_B32_e32: 407 case AMDGPU::S_OR_B32: 408 Result = LHS | RHS; 409 return true; 410 case AMDGPU::V_XOR_B32_e64: 411 case AMDGPU::V_XOR_B32_e32: 412 case AMDGPU::S_XOR_B32: 413 Result = LHS ^ RHS; 414 return true; 415 case AMDGPU::V_LSHL_B32_e64: 416 case AMDGPU::V_LSHL_B32_e32: 417 case AMDGPU::S_LSHL_B32: 418 // The instruction ignores the high bits for out of bounds shifts. 419 Result = LHS << (RHS & 31); 420 return true; 421 case AMDGPU::V_LSHLREV_B32_e64: 422 case AMDGPU::V_LSHLREV_B32_e32: 423 Result = RHS << (LHS & 31); 424 return true; 425 case AMDGPU::V_LSHR_B32_e64: 426 case AMDGPU::V_LSHR_B32_e32: 427 case AMDGPU::S_LSHR_B32: 428 Result = LHS >> (RHS & 31); 429 return true; 430 case AMDGPU::V_LSHRREV_B32_e64: 431 case AMDGPU::V_LSHRREV_B32_e32: 432 Result = RHS >> (LHS & 31); 433 return true; 434 case AMDGPU::V_ASHR_I32_e64: 435 case AMDGPU::V_ASHR_I32_e32: 436 case AMDGPU::S_ASHR_I32: 437 Result = static_cast<int32_t>(LHS) >> (RHS & 31); 438 return true; 439 case AMDGPU::V_ASHRREV_I32_e64: 440 case AMDGPU::V_ASHRREV_I32_e32: 441 Result = static_cast<int32_t>(RHS) >> (LHS & 31); 442 return true; 443 default: 444 return false; 445 } 446 } 447 448 static unsigned getMovOpc(bool IsScalar) { 449 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 450 } 451 452 /// Remove any leftover implicit operands from mutating the instruction. e.g. 453 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def 454 /// anymore. 455 static void stripExtraCopyOperands(MachineInstr &MI) { 456 const MCInstrDesc &Desc = MI.getDesc(); 457 unsigned NumOps = Desc.getNumOperands() + 458 Desc.getNumImplicitUses() + 459 Desc.getNumImplicitDefs(); 460 461 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I) 462 MI.RemoveOperand(I); 463 } 464 465 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { 466 MI.setDesc(NewDesc); 467 stripExtraCopyOperands(MI); 468 } 469 470 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI, 471 MachineOperand &Op) { 472 if (Op.isReg()) { 473 // If this has a subregister, it obviously is a register source. 474 if (Op.getSubReg() != AMDGPU::NoSubRegister || 475 !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 476 return &Op; 477 478 MachineInstr *Def = MRI.getVRegDef(Op.getReg()); 479 if (Def && Def->isMoveImmediate()) { 480 MachineOperand &ImmSrc = Def->getOperand(1); 481 if (ImmSrc.isImm()) 482 return &ImmSrc; 483 } 484 } 485 486 return &Op; 487 } 488 489 // Try to simplify operations with a constant that may appear after instruction 490 // selection. 491 // TODO: See if a frame index with a fixed offset can fold. 492 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, 493 const SIInstrInfo *TII, 494 MachineInstr *MI, 495 MachineOperand *ImmOp) { 496 unsigned Opc = MI->getOpcode(); 497 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || 498 Opc == AMDGPU::S_NOT_B32) { 499 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm()); 500 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); 501 return true; 502 } 503 504 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 505 if (Src1Idx == -1) 506 return false; 507 508 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 509 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); 510 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx)); 511 512 if (!Src0->isImm() && !Src1->isImm()) 513 return false; 514 515 // and k0, k1 -> v_mov_b32 (k0 & k1) 516 // or k0, k1 -> v_mov_b32 (k0 | k1) 517 // xor k0, k1 -> v_mov_b32 (k0 ^ k1) 518 if (Src0->isImm() && Src1->isImm()) { 519 int32_t NewImm; 520 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 521 return false; 522 523 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 524 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg()); 525 526 // Be careful to change the right operand, src0 may belong to a different 527 // instruction. 528 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm); 529 MI->RemoveOperand(Src1Idx); 530 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); 531 return true; 532 } 533 534 if (!MI->isCommutable()) 535 return false; 536 537 if (Src0->isImm() && !Src1->isImm()) { 538 std::swap(Src0, Src1); 539 std::swap(Src0Idx, Src1Idx); 540 } 541 542 int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); 543 if (Opc == AMDGPU::V_OR_B32_e64 || 544 Opc == AMDGPU::V_OR_B32_e32 || 545 Opc == AMDGPU::S_OR_B32) { 546 if (Src1Val == 0) { 547 // y = or x, 0 => y = copy x 548 MI->RemoveOperand(Src1Idx); 549 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 550 } else if (Src1Val == -1) { 551 // y = or x, -1 => y = v_mov_b32 -1 552 MI->RemoveOperand(Src1Idx); 553 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); 554 } else 555 return false; 556 557 return true; 558 } 559 560 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 || 561 MI->getOpcode() == AMDGPU::V_AND_B32_e32 || 562 MI->getOpcode() == AMDGPU::S_AND_B32) { 563 if (Src1Val == 0) { 564 // y = and x, 0 => y = v_mov_b32 0 565 MI->RemoveOperand(Src0Idx); 566 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); 567 } else if (Src1Val == -1) { 568 // y = and x, -1 => y = copy x 569 MI->RemoveOperand(Src1Idx); 570 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 571 stripExtraCopyOperands(*MI); 572 } else 573 return false; 574 575 return true; 576 } 577 578 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 || 579 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 || 580 MI->getOpcode() == AMDGPU::S_XOR_B32) { 581 if (Src1Val == 0) { 582 // y = xor x, 0 => y = copy x 583 MI->RemoveOperand(Src1Idx); 584 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 585 return true; 586 } 587 } 588 589 return false; 590 } 591 592 // Try to fold an instruction into a simpler one 593 static bool tryFoldInst(const SIInstrInfo *TII, 594 MachineInstr *MI) { 595 unsigned Opc = MI->getOpcode(); 596 597 if (Opc == AMDGPU::V_CNDMASK_B32_e32 || 598 Opc == AMDGPU::V_CNDMASK_B32_e64 || 599 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) { 600 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); 601 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1); 602 if (Src1->isIdenticalTo(*Src0)) { 603 DEBUG(dbgs() << "Folded " << *MI << " into "); 604 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 605 if (Src2Idx != -1) 606 MI->RemoveOperand(Src2Idx); 607 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); 608 mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY 609 : getMovOpc(false))); 610 DEBUG(dbgs() << *MI << '\n'); 611 return true; 612 } 613 } 614 615 return false; 616 } 617 618 void SIFoldOperands::foldInstOperand(MachineInstr &MI, 619 MachineOperand &OpToFold) const { 620 // We need mutate the operands of new mov instructions to add implicit 621 // uses of EXEC, but adding them invalidates the use_iterator, so defer 622 // this. 623 SmallVector<MachineInstr *, 4> CopiesToReplace; 624 SmallVector<FoldCandidate, 4> FoldList; 625 MachineOperand &Dst = MI.getOperand(0); 626 627 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); 628 if (FoldingImm) { 629 unsigned NumLiteralUses = 0; 630 MachineOperand *NonInlineUse = nullptr; 631 int NonInlineUseOpNo = -1; 632 633 MachineRegisterInfo::use_iterator NextUse; 634 for (MachineRegisterInfo::use_iterator 635 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 636 Use != E; Use = NextUse) { 637 NextUse = std::next(Use); 638 MachineInstr *UseMI = Use->getParent(); 639 unsigned OpNo = Use.getOperandNo(); 640 641 // Folding the immediate may reveal operations that can be constant 642 // folded or replaced with a copy. This can happen for example after 643 // frame indices are lowered to constants or from splitting 64-bit 644 // constants. 645 // 646 // We may also encounter cases where one or both operands are 647 // immediates materialized into a register, which would ordinarily not 648 // be folded due to multiple uses or operand constraints. 649 650 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) { 651 DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n'); 652 653 // Some constant folding cases change the same immediate's use to a new 654 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user 655 // again. The same constant folded instruction could also have a second 656 // use operand. 657 NextUse = MRI->use_begin(Dst.getReg()); 658 FoldList.clear(); 659 continue; 660 } 661 662 // Try to fold any inline immediate uses, and then only fold other 663 // constants if they have one use. 664 // 665 // The legality of the inline immediate must be checked based on the use 666 // operand, not the defining instruction, because 32-bit instructions 667 // with 32-bit inline immediate sources may be used to materialize 668 // constants used in 16-bit operands. 669 // 670 // e.g. it is unsafe to fold: 671 // s_mov_b32 s0, 1.0 // materializes 0x3f800000 672 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00 673 674 // Folding immediates with more than one use will increase program size. 675 // FIXME: This will also reduce register usage, which may be better 676 // in some cases. A better heuristic is needed. 677 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) { 678 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace); 679 } else { 680 if (++NumLiteralUses == 1) { 681 NonInlineUse = &*Use; 682 NonInlineUseOpNo = OpNo; 683 } 684 } 685 } 686 687 if (NumLiteralUses == 1) { 688 MachineInstr *UseMI = NonInlineUse->getParent(); 689 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace); 690 } 691 } else { 692 // Folding register. 693 for (MachineRegisterInfo::use_iterator 694 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 695 Use != E; ++Use) { 696 MachineInstr *UseMI = Use->getParent(); 697 698 foldOperand(OpToFold, UseMI, Use.getOperandNo(), 699 FoldList, CopiesToReplace); 700 } 701 } 702 703 MachineFunction *MF = MI.getParent()->getParent(); 704 // Make sure we add EXEC uses to any new v_mov instructions created. 705 for (MachineInstr *Copy : CopiesToReplace) 706 Copy->addImplicitDefUseOperands(*MF); 707 708 for (FoldCandidate &Fold : FoldList) { 709 if (updateOperand(Fold, *TRI)) { 710 // Clear kill flags. 711 if (Fold.isReg()) { 712 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 713 // FIXME: Probably shouldn't bother trying to fold if not an 714 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 715 // copies. 716 MRI->clearKillFlags(Fold.OpToFold->getReg()); 717 } 718 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " << 719 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n'); 720 tryFoldInst(TII, Fold.UseMI); 721 } else if (Fold.isCommuted()) { 722 // Restoring instruction's original operand order if fold has failed. 723 TII->commuteInstruction(*Fold.UseMI, false); 724 } 725 } 726 } 727 728 // Clamp patterns are canonically selected to v_max_* instructions, so only 729 // handle them. 730 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const { 731 unsigned Op = MI.getOpcode(); 732 switch (Op) { 733 case AMDGPU::V_MAX_F32_e64: 734 case AMDGPU::V_MAX_F16_e64: 735 case AMDGPU::V_MAX_F64: 736 case AMDGPU::V_PK_MAX_F16: { 737 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) 738 return nullptr; 739 740 // Make sure sources are identical. 741 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 742 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 743 if (!Src0->isReg() || !Src1->isReg() || 744 Src0->getReg() != Src1->getReg() || 745 Src0->getSubReg() != Src1->getSubReg() || 746 Src0->getSubReg() != AMDGPU::NoSubRegister) 747 return nullptr; 748 749 // Can't fold up if we have modifiers. 750 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 751 return nullptr; 752 753 unsigned Src0Mods 754 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); 755 unsigned Src1Mods 756 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); 757 758 // Having a 0 op_sel_hi would require swizzling the output in the source 759 // instruction, which we can't do. 760 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 : 0; 761 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods) 762 return nullptr; 763 return Src0; 764 } 765 default: 766 return nullptr; 767 } 768 } 769 770 // We obviously have multiple uses in a clamp since the register is used twice 771 // in the same instruction. 772 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) { 773 int Count = 0; 774 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); 775 I != E; ++I) { 776 if (++Count > 1) 777 return false; 778 } 779 780 return true; 781 } 782 783 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel. 784 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) { 785 const MachineOperand *ClampSrc = isClamp(MI); 786 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg())) 787 return false; 788 789 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg()); 790 791 // The type of clamp must be compatible. 792 if (TII->getClampMask(*Def) != TII->getClampMask(MI)) 793 return false; 794 795 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); 796 if (!DefClamp) 797 return false; 798 799 DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n'); 800 801 // Clamp is applied after omod, so it is OK if omod is set. 802 DefClamp->setImm(1); 803 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 804 MI.eraseFromParent(); 805 return true; 806 } 807 808 static int getOModValue(unsigned Opc, int64_t Val) { 809 switch (Opc) { 810 case AMDGPU::V_MUL_F32_e64: { 811 switch (static_cast<uint32_t>(Val)) { 812 case 0x3f000000: // 0.5 813 return SIOutMods::DIV2; 814 case 0x40000000: // 2.0 815 return SIOutMods::MUL2; 816 case 0x40800000: // 4.0 817 return SIOutMods::MUL4; 818 default: 819 return SIOutMods::NONE; 820 } 821 } 822 case AMDGPU::V_MUL_F16_e64: { 823 switch (static_cast<uint16_t>(Val)) { 824 case 0x3800: // 0.5 825 return SIOutMods::DIV2; 826 case 0x4000: // 2.0 827 return SIOutMods::MUL2; 828 case 0x4400: // 4.0 829 return SIOutMods::MUL4; 830 default: 831 return SIOutMods::NONE; 832 } 833 } 834 default: 835 llvm_unreachable("invalid mul opcode"); 836 } 837 } 838 839 // FIXME: Does this really not support denormals with f16? 840 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not 841 // handled, so will anything other than that break? 842 std::pair<const MachineOperand *, int> 843 SIFoldOperands::isOMod(const MachineInstr &MI) const { 844 unsigned Op = MI.getOpcode(); 845 switch (Op) { 846 case AMDGPU::V_MUL_F32_e64: 847 case AMDGPU::V_MUL_F16_e64: { 848 // If output denormals are enabled, omod is ignored. 849 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) || 850 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals())) 851 return std::make_pair(nullptr, SIOutMods::NONE); 852 853 const MachineOperand *RegOp = nullptr; 854 const MachineOperand *ImmOp = nullptr; 855 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 856 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 857 if (Src0->isImm()) { 858 ImmOp = Src0; 859 RegOp = Src1; 860 } else if (Src1->isImm()) { 861 ImmOp = Src1; 862 RegOp = Src0; 863 } else 864 return std::make_pair(nullptr, SIOutMods::NONE); 865 866 int OMod = getOModValue(Op, ImmOp->getImm()); 867 if (OMod == SIOutMods::NONE || 868 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 869 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 870 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || 871 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) 872 return std::make_pair(nullptr, SIOutMods::NONE); 873 874 return std::make_pair(RegOp, OMod); 875 } 876 case AMDGPU::V_ADD_F32_e64: 877 case AMDGPU::V_ADD_F16_e64: { 878 // If output denormals are enabled, omod is ignored. 879 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) || 880 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals())) 881 return std::make_pair(nullptr, SIOutMods::NONE); 882 883 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x 884 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 885 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 886 887 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() && 888 Src0->getSubReg() == Src1->getSubReg() && 889 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && 890 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && 891 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && 892 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 893 return std::make_pair(Src0, SIOutMods::MUL2); 894 895 return std::make_pair(nullptr, SIOutMods::NONE); 896 } 897 default: 898 return std::make_pair(nullptr, SIOutMods::NONE); 899 } 900 } 901 902 // FIXME: Does this need to check IEEE bit on function? 903 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { 904 const MachineOperand *RegOp; 905 int OMod; 906 std::tie(RegOp, OMod) = isOMod(MI); 907 if (OMod == SIOutMods::NONE || !RegOp->isReg() || 908 RegOp->getSubReg() != AMDGPU::NoSubRegister || 909 !hasOneNonDBGUseInst(*MRI, RegOp->getReg())) 910 return false; 911 912 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); 913 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); 914 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) 915 return false; 916 917 // Clamp is applied after omod. If the source already has clamp set, don't 918 // fold it. 919 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) 920 return false; 921 922 DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n'); 923 924 DefOMod->setImm(OMod); 925 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); 926 MI.eraseFromParent(); 927 return true; 928 } 929 930 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 931 if (skipFunction(MF.getFunction())) 932 return false; 933 934 MRI = &MF.getRegInfo(); 935 ST = &MF.getSubtarget<SISubtarget>(); 936 TII = ST->getInstrInfo(); 937 TRI = &TII->getRegisterInfo(); 938 939 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 940 941 // omod is ignored by hardware if IEEE bit is enabled. omod also does not 942 // correctly handle signed zeros. 943 // 944 // TODO: Check nsz on instructions when fast math flags are preserved to MI 945 // level. 946 bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath(); 947 948 for (MachineBasicBlock *MBB : depth_first(&MF)) { 949 MachineBasicBlock::iterator I, Next; 950 for (I = MBB->begin(); I != MBB->end(); I = Next) { 951 Next = std::next(I); 952 MachineInstr &MI = *I; 953 954 tryFoldInst(TII, &MI); 955 956 if (!TII->isFoldableCopy(MI)) { 957 if (IsIEEEMode || !tryFoldOMod(MI)) 958 tryFoldClamp(MI); 959 continue; 960 } 961 962 MachineOperand &OpToFold = MI.getOperand(1); 963 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); 964 965 // FIXME: We could also be folding things like TargetIndexes. 966 if (!FoldingImm && !OpToFold.isReg()) 967 continue; 968 969 if (OpToFold.isReg() && 970 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg())) 971 continue; 972 973 // Prevent folding operands backwards in the function. For example, 974 // the COPY opcode must not be replaced by 1 in this example: 975 // 976 // %3 = COPY %vgpr0; VGPR_32:%3 977 // ... 978 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec 979 MachineOperand &Dst = MI.getOperand(0); 980 if (Dst.isReg() && 981 !TargetRegisterInfo::isVirtualRegister(Dst.getReg())) 982 continue; 983 984 foldInstOperand(MI, OpToFold); 985 } 986 } 987 return false; 988 } 989