1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// \file 9 //===----------------------------------------------------------------------===// 10 // 11 12 #include "AMDGPU.h" 13 #include "AMDGPUSubtarget.h" 14 #include "SIInstrInfo.h" 15 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 16 #include "llvm/CodeGen/MachineFunctionPass.h" 17 #include "llvm/CodeGen/MachineInstrBuilder.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/Support/Debug.h" 20 #include "llvm/Support/raw_ostream.h" 21 #include "llvm/Target/TargetMachine.h" 22 23 #define DEBUG_TYPE "si-fold-operands" 24 using namespace llvm; 25 26 namespace { 27 28 struct FoldCandidate { 29 MachineInstr *UseMI; 30 union { 31 MachineOperand *OpToFold; 32 uint64_t ImmToFold; 33 int FrameIndexToFold; 34 }; 35 unsigned char UseOpNo; 36 MachineOperand::MachineOperandType Kind; 37 38 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) : 39 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()) { 40 if (FoldOp->isImm()) { 41 ImmToFold = FoldOp->getImm(); 42 } else if (FoldOp->isFI()) { 43 FrameIndexToFold = FoldOp->getIndex(); 44 } else { 45 assert(FoldOp->isReg()); 46 OpToFold = FoldOp; 47 } 48 } 49 50 bool isFI() const { 51 return Kind == MachineOperand::MO_FrameIndex; 52 } 53 54 bool isImm() const { 55 return Kind == MachineOperand::MO_Immediate; 56 } 57 58 bool isReg() const { 59 return Kind == MachineOperand::MO_Register; 60 } 61 }; 62 63 class SIFoldOperands : public MachineFunctionPass { 64 public: 65 static char ID; 66 MachineRegisterInfo *MRI; 67 const SIInstrInfo *TII; 68 const SIRegisterInfo *TRI; 69 70 void foldOperand(MachineOperand &OpToFold, 71 MachineInstr *UseMI, 72 unsigned UseOpIdx, 73 SmallVectorImpl<FoldCandidate> &FoldList, 74 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const; 75 76 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const; 77 78 public: 79 SIFoldOperands() : MachineFunctionPass(ID) { 80 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 81 } 82 83 bool runOnMachineFunction(MachineFunction &MF) override; 84 85 StringRef getPassName() const override { return "SI Fold Operands"; } 86 87 void getAnalysisUsage(AnalysisUsage &AU) const override { 88 AU.setPreservesCFG(); 89 MachineFunctionPass::getAnalysisUsage(AU); 90 } 91 }; 92 93 } // End anonymous namespace. 94 95 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 96 "SI Fold Operands", false, false) 97 98 char SIFoldOperands::ID = 0; 99 100 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 101 102 FunctionPass *llvm::createSIFoldOperandsPass() { 103 return new SIFoldOperands(); 104 } 105 106 static bool isSafeToFold(const MachineInstr &MI) { 107 switch (MI.getOpcode()) { 108 case AMDGPU::V_MOV_B32_e32: 109 case AMDGPU::V_MOV_B32_e64: 110 case AMDGPU::V_MOV_B64_PSEUDO: { 111 // If there are additional implicit register operands, this may be used for 112 // register indexing so the source register operand isn't simply copied. 113 unsigned NumOps = MI.getDesc().getNumOperands() + 114 MI.getDesc().getNumImplicitUses(); 115 116 return MI.getNumOperands() == NumOps; 117 } 118 case AMDGPU::S_MOV_B32: 119 case AMDGPU::S_MOV_B64: 120 case AMDGPU::COPY: 121 return true; 122 default: 123 return false; 124 } 125 } 126 127 static bool updateOperand(FoldCandidate &Fold, 128 const TargetRegisterInfo &TRI) { 129 MachineInstr *MI = Fold.UseMI; 130 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 131 assert(Old.isReg()); 132 133 if (Fold.isImm()) { 134 Old.ChangeToImmediate(Fold.ImmToFold); 135 return true; 136 } 137 138 if (Fold.isFI()) { 139 Old.ChangeToFrameIndex(Fold.FrameIndexToFold); 140 return true; 141 } 142 143 MachineOperand *New = Fold.OpToFold; 144 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) && 145 TargetRegisterInfo::isVirtualRegister(New->getReg())) { 146 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 147 return true; 148 } 149 150 // FIXME: Handle physical registers. 151 152 return false; 153 } 154 155 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList, 156 const MachineInstr *MI) { 157 for (auto Candidate : FoldList) { 158 if (Candidate.UseMI == MI) 159 return true; 160 } 161 return false; 162 } 163 164 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList, 165 MachineInstr *MI, unsigned OpNo, 166 MachineOperand *OpToFold, 167 const SIInstrInfo *TII) { 168 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 169 170 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2 171 unsigned Opc = MI->getOpcode(); 172 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) && 173 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 174 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64; 175 176 // Check if changing this to a v_mad_{f16, f32} instruction will allow us 177 // to fold the operand. 178 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16)); 179 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 180 if (FoldAsMAD) { 181 MI->untieRegOperand(OpNo); 182 return true; 183 } 184 MI->setDesc(TII->get(Opc)); 185 } 186 187 // Special case for s_setreg_b32 188 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) { 189 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32)); 190 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 191 return true; 192 } 193 194 // If we are already folding into another operand of MI, then 195 // we can't commute the instruction, otherwise we risk making the 196 // other fold illegal. 197 if (isUseMIInFoldList(FoldList, MI)) 198 return false; 199 200 // Operand is not legal, so try to commute the instruction to 201 // see if this makes it possible to fold. 202 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 203 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 204 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1); 205 206 if (CanCommute) { 207 if (CommuteIdx0 == OpNo) 208 OpNo = CommuteIdx1; 209 else if (CommuteIdx1 == OpNo) 210 OpNo = CommuteIdx0; 211 } 212 213 // One of operands might be an Imm operand, and OpNo may refer to it after 214 // the call of commuteInstruction() below. Such situations are avoided 215 // here explicitly as OpNo must be a register operand to be a candidate 216 // for memory folding. 217 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 218 !MI->getOperand(CommuteIdx1).isReg())) 219 return false; 220 221 if (!CanCommute || 222 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1)) 223 return false; 224 225 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) 226 return false; 227 } 228 229 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 230 return true; 231 } 232 233 // If the use operand doesn't care about the value, this may be an operand only 234 // used for register indexing, in which case it is unsafe to fold. 235 static bool isUseSafeToFold(const MachineInstr &MI, 236 const MachineOperand &UseMO) { 237 return !UseMO.isUndef(); 238 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg()); 239 } 240 241 void SIFoldOperands::foldOperand( 242 MachineOperand &OpToFold, 243 MachineInstr *UseMI, 244 unsigned UseOpIdx, 245 SmallVectorImpl<FoldCandidate> &FoldList, 246 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const { 247 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 248 249 if (!isUseSafeToFold(*UseMI, UseOp)) 250 return; 251 252 // FIXME: Fold operands with subregs. 253 if (UseOp.isReg() && OpToFold.isReg()) { 254 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) 255 return; 256 257 // Don't fold subregister extracts into tied operands, only if it is a full 258 // copy since a subregister use tied to a full register def doesn't really 259 // make sense. e.g. don't fold: 260 // 261 // %vreg1 = COPY %vreg0:sub1 262 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0> 263 // 264 // into 265 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0> 266 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister) 267 return; 268 } 269 270 // Special case for REG_SEQUENCE: We can't fold literals into 271 // REG_SEQUENCE instructions, so we have to fold them into the 272 // uses of REG_SEQUENCE. 273 if (UseMI->isRegSequence()) { 274 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg(); 275 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 276 277 for (MachineRegisterInfo::use_iterator 278 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end(); 279 RSUse != RSE; ++RSUse) { 280 281 MachineInstr *RSUseMI = RSUse->getParent(); 282 if (RSUse->getSubReg() != RegSeqDstSubReg) 283 continue; 284 285 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 286 CopiesToReplace); 287 } 288 289 return; 290 } 291 292 293 bool FoldingImm = OpToFold.isImm(); 294 295 // In order to fold immediates into copies, we need to change the 296 // copy to a MOV. 297 if (FoldingImm && UseMI->isCopy()) { 298 unsigned DestReg = UseMI->getOperand(0).getReg(); 299 const TargetRegisterClass *DestRC 300 = TargetRegisterInfo::isVirtualRegister(DestReg) ? 301 MRI->getRegClass(DestReg) : 302 TRI->getPhysRegClass(DestReg); 303 304 unsigned MovOp = TII->getMovOpcode(DestRC); 305 if (MovOp == AMDGPU::COPY) 306 return; 307 308 UseMI->setDesc(TII->get(MovOp)); 309 CopiesToReplace.push_back(UseMI); 310 } else { 311 const MCInstrDesc &UseDesc = UseMI->getDesc(); 312 313 // Don't fold into target independent nodes. Target independent opcodes 314 // don't have defined register classes. 315 if (UseDesc.isVariadic() || 316 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 317 return; 318 } 319 320 if (!FoldingImm) { 321 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 322 323 // FIXME: We could try to change the instruction from 64-bit to 32-bit 324 // to enable more folding opportunites. The shrink operands pass 325 // already does this. 326 return; 327 } 328 329 330 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); 331 const TargetRegisterClass *FoldRC = 332 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); 333 334 APInt Imm(TII->operandBitWidth(FoldDesc.OpInfo[1].OperandType), 335 OpToFold.getImm()); 336 337 // Split 64-bit constants into 32-bits for folding. 338 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { 339 unsigned UseReg = UseOp.getReg(); 340 const TargetRegisterClass *UseRC 341 = TargetRegisterInfo::isVirtualRegister(UseReg) ? 342 MRI->getRegClass(UseReg) : 343 TRI->getPhysRegClass(UseReg); 344 345 assert(Imm.getBitWidth() == 64); 346 347 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64) 348 return; 349 350 if (UseOp.getSubReg() == AMDGPU::sub0) { 351 Imm = Imm.getLoBits(32); 352 } else { 353 assert(UseOp.getSubReg() == AMDGPU::sub1); 354 Imm = Imm.getHiBits(32); 355 } 356 } 357 358 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 359 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 360 } 361 362 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result, 363 uint32_t LHS, uint32_t RHS) { 364 switch (Opcode) { 365 case AMDGPU::V_AND_B32_e64: 366 case AMDGPU::V_AND_B32_e32: 367 case AMDGPU::S_AND_B32: 368 Result = LHS & RHS; 369 return true; 370 case AMDGPU::V_OR_B32_e64: 371 case AMDGPU::V_OR_B32_e32: 372 case AMDGPU::S_OR_B32: 373 Result = LHS | RHS; 374 return true; 375 case AMDGPU::V_XOR_B32_e64: 376 case AMDGPU::V_XOR_B32_e32: 377 case AMDGPU::S_XOR_B32: 378 Result = LHS ^ RHS; 379 return true; 380 case AMDGPU::V_LSHL_B32_e64: 381 case AMDGPU::V_LSHL_B32_e32: 382 case AMDGPU::S_LSHL_B32: 383 // The instruction ignores the high bits for out of bounds shifts. 384 Result = LHS << (RHS & 31); 385 return true; 386 case AMDGPU::V_LSHLREV_B32_e64: 387 case AMDGPU::V_LSHLREV_B32_e32: 388 Result = RHS << (LHS & 31); 389 return true; 390 case AMDGPU::V_LSHR_B32_e64: 391 case AMDGPU::V_LSHR_B32_e32: 392 case AMDGPU::S_LSHR_B32: 393 Result = LHS >> (RHS & 31); 394 return true; 395 case AMDGPU::V_LSHRREV_B32_e64: 396 case AMDGPU::V_LSHRREV_B32_e32: 397 Result = RHS >> (LHS & 31); 398 return true; 399 case AMDGPU::V_ASHR_I32_e64: 400 case AMDGPU::V_ASHR_I32_e32: 401 case AMDGPU::S_ASHR_I32: 402 Result = static_cast<int32_t>(LHS) >> (RHS & 31); 403 return true; 404 case AMDGPU::V_ASHRREV_I32_e64: 405 case AMDGPU::V_ASHRREV_I32_e32: 406 Result = static_cast<int32_t>(RHS) >> (LHS & 31); 407 return true; 408 default: 409 return false; 410 } 411 } 412 413 static unsigned getMovOpc(bool IsScalar) { 414 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 415 } 416 417 /// Remove any leftover implicit operands from mutating the instruction. e.g. 418 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def 419 /// anymore. 420 static void stripExtraCopyOperands(MachineInstr &MI) { 421 const MCInstrDesc &Desc = MI.getDesc(); 422 unsigned NumOps = Desc.getNumOperands() + 423 Desc.getNumImplicitUses() + 424 Desc.getNumImplicitDefs(); 425 426 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I) 427 MI.RemoveOperand(I); 428 } 429 430 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) { 431 MI.setDesc(NewDesc); 432 stripExtraCopyOperands(MI); 433 } 434 435 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI, 436 MachineOperand &Op) { 437 if (Op.isReg()) { 438 // If this has a subregister, it obviously is a register source. 439 if (Op.getSubReg() != AMDGPU::NoSubRegister) 440 return &Op; 441 442 MachineInstr *Def = MRI.getVRegDef(Op.getReg()); 443 if (Def->isMoveImmediate()) { 444 MachineOperand &ImmSrc = Def->getOperand(1); 445 if (ImmSrc.isImm()) 446 return &ImmSrc; 447 } 448 } 449 450 return &Op; 451 } 452 453 // Try to simplify operations with a constant that may appear after instruction 454 // selection. 455 // TODO: See if a frame index with a fixed offset can fold. 456 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, 457 const SIInstrInfo *TII, 458 MachineInstr *MI, 459 MachineOperand *ImmOp) { 460 unsigned Opc = MI->getOpcode(); 461 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || 462 Opc == AMDGPU::S_NOT_B32) { 463 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm()); 464 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); 465 return true; 466 } 467 468 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 469 if (Src1Idx == -1) 470 return false; 471 472 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 473 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx)); 474 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx)); 475 476 if (!Src0->isImm() && !Src1->isImm()) 477 return false; 478 479 // and k0, k1 -> v_mov_b32 (k0 & k1) 480 // or k0, k1 -> v_mov_b32 (k0 | k1) 481 // xor k0, k1 -> v_mov_b32 (k0 ^ k1) 482 if (Src0->isImm() && Src1->isImm()) { 483 int32_t NewImm; 484 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) 485 return false; 486 487 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 488 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg()); 489 490 // Be careful to change the right operand, src0 may belong to a different 491 // instruction. 492 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm); 493 MI->RemoveOperand(Src1Idx); 494 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); 495 return true; 496 } 497 498 if (!MI->isCommutable()) 499 return false; 500 501 if (Src0->isImm() && !Src1->isImm()) { 502 std::swap(Src0, Src1); 503 std::swap(Src0Idx, Src1Idx); 504 } 505 506 int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); 507 if (Opc == AMDGPU::V_OR_B32_e64 || 508 Opc == AMDGPU::V_OR_B32_e32 || 509 Opc == AMDGPU::S_OR_B32) { 510 if (Src1Val == 0) { 511 // y = or x, 0 => y = copy x 512 MI->RemoveOperand(Src1Idx); 513 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 514 } else if (Src1Val == -1) { 515 // y = or x, -1 => y = v_mov_b32 -1 516 MI->RemoveOperand(Src1Idx); 517 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); 518 } else 519 return false; 520 521 return true; 522 } 523 524 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 || 525 MI->getOpcode() == AMDGPU::V_AND_B32_e32 || 526 MI->getOpcode() == AMDGPU::S_AND_B32) { 527 if (Src1Val == 0) { 528 // y = and x, 0 => y = v_mov_b32 0 529 MI->RemoveOperand(Src0Idx); 530 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); 531 } else if (Src1Val == -1) { 532 // y = and x, -1 => y = copy x 533 MI->RemoveOperand(Src1Idx); 534 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 535 stripExtraCopyOperands(*MI); 536 } else 537 return false; 538 539 return true; 540 } 541 542 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 || 543 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 || 544 MI->getOpcode() == AMDGPU::S_XOR_B32) { 545 if (Src1Val == 0) { 546 // y = xor x, 0 => y = copy x 547 MI->RemoveOperand(Src1Idx); 548 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); 549 return true; 550 } 551 } 552 553 return false; 554 } 555 556 void SIFoldOperands::foldInstOperand(MachineInstr &MI, 557 MachineOperand &OpToFold) const { 558 // We need mutate the operands of new mov instructions to add implicit 559 // uses of EXEC, but adding them invalidates the use_iterator, so defer 560 // this. 561 SmallVector<MachineInstr *, 4> CopiesToReplace; 562 SmallVector<FoldCandidate, 4> FoldList; 563 MachineOperand &Dst = MI.getOperand(0); 564 565 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); 566 if (FoldingImm) { 567 unsigned NumLiteralUses = 0; 568 MachineOperand *NonInlineUse = nullptr; 569 int NonInlineUseOpNo = -1; 570 571 MachineRegisterInfo::use_iterator NextUse, NextInstUse; 572 for (MachineRegisterInfo::use_iterator 573 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 574 Use != E; Use = NextUse) { 575 NextUse = std::next(Use); 576 MachineInstr *UseMI = Use->getParent(); 577 unsigned OpNo = Use.getOperandNo(); 578 579 // Folding the immediate may reveal operations that can be constant 580 // folded or replaced with a copy. This can happen for example after 581 // frame indices are lowered to constants or from splitting 64-bit 582 // constants. 583 // 584 // We may also encounter cases where one or both operands are 585 // immediates materialized into a register, which would ordinarily not 586 // be folded due to multiple uses or operand constraints. 587 588 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) { 589 DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n'); 590 591 // Some constant folding cases change the same immediate's use to a new 592 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user 593 // again. The same constant folded instruction could also have a second 594 // use operand. 595 NextUse = MRI->use_begin(Dst.getReg()); 596 continue; 597 } 598 599 // Try to fold any inline immediate uses, and then only fold other 600 // constants if they have one use. 601 // 602 // The legality of the inline immediate must be checked based on the use 603 // operand, not the defining instruction, because 32-bit instructions 604 // with 32-bit inline immediate sources may be used to materialize 605 // constants used in 16-bit operands. 606 // 607 // e.g. it is unsafe to fold: 608 // s_mov_b32 s0, 1.0 // materializes 0x3f800000 609 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00 610 611 // Folding immediates with more than one use will increase program size. 612 // FIXME: This will also reduce register usage, which may be better 613 // in some cases. A better heuristic is needed. 614 if (TII->isInlineConstant(*UseMI, OpNo, OpToFold)) { 615 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace); 616 } else { 617 if (++NumLiteralUses == 1) { 618 NonInlineUse = &*Use; 619 NonInlineUseOpNo = OpNo; 620 } 621 } 622 } 623 624 if (NumLiteralUses == 1) { 625 MachineInstr *UseMI = NonInlineUse->getParent(); 626 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace); 627 } 628 } else { 629 // Folding register. 630 for (MachineRegisterInfo::use_iterator 631 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end(); 632 Use != E; ++Use) { 633 MachineInstr *UseMI = Use->getParent(); 634 635 foldOperand(OpToFold, UseMI, Use.getOperandNo(), 636 FoldList, CopiesToReplace); 637 } 638 } 639 640 MachineFunction *MF = MI.getParent()->getParent(); 641 // Make sure we add EXEC uses to any new v_mov instructions created. 642 for (MachineInstr *Copy : CopiesToReplace) 643 Copy->addImplicitDefUseOperands(*MF); 644 645 for (FoldCandidate &Fold : FoldList) { 646 if (updateOperand(Fold, *TRI)) { 647 // Clear kill flags. 648 if (Fold.isReg()) { 649 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 650 // FIXME: Probably shouldn't bother trying to fold if not an 651 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 652 // copies. 653 MRI->clearKillFlags(Fold.OpToFold->getReg()); 654 } 655 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " << 656 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n'); 657 } 658 } 659 } 660 661 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 662 if (skipFunction(*MF.getFunction())) 663 return false; 664 665 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 666 667 MRI = &MF.getRegInfo(); 668 TII = ST.getInstrInfo(); 669 TRI = &TII->getRegisterInfo(); 670 671 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 672 BI != BE; ++BI) { 673 674 MachineBasicBlock &MBB = *BI; 675 MachineBasicBlock::iterator I, Next; 676 for (I = MBB.begin(); I != MBB.end(); I = Next) { 677 Next = std::next(I); 678 MachineInstr &MI = *I; 679 680 if (!isSafeToFold(MI)) 681 continue; 682 683 MachineOperand &OpToFold = MI.getOperand(1); 684 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI(); 685 686 // FIXME: We could also be folding things like TargetIndexes. 687 if (!FoldingImm && !OpToFold.isReg()) 688 continue; 689 690 if (OpToFold.isReg() && 691 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg())) 692 continue; 693 694 // Prevent folding operands backwards in the function. For example, 695 // the COPY opcode must not be replaced by 1 in this example: 696 // 697 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3 698 // ... 699 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use> 700 MachineOperand &Dst = MI.getOperand(0); 701 if (Dst.isReg() && 702 !TargetRegisterInfo::isVirtualRegister(Dst.getReg())) 703 continue; 704 705 foldInstOperand(MI, OpToFold); 706 } 707 } 708 return false; 709 } 710