1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Methods common to all machine instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/MachineInstr.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/FoldingSet.h" 18 #include "llvm/ADT/Hashing.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallBitVector.h" 22 #include "llvm/ADT/SmallString.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/Loads.h" 26 #include "llvm/Analysis/MemoryLocation.h" 27 #include "llvm/CodeGen/GlobalISel/RegisterBank.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineInstrBundle.h" 32 #include "llvm/CodeGen/MachineMemOperand.h" 33 #include "llvm/CodeGen/MachineModuleInfo.h" 34 #include "llvm/CodeGen/MachineOperand.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/PseudoSourceValue.h" 37 #include "llvm/CodeGen/TargetInstrInfo.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DebugInfoMetadata.h" 42 #include "llvm/IR/DebugLoc.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/InlineAsm.h" 46 #include "llvm/IR/InstrTypes.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/Metadata.h" 50 #include "llvm/IR/Module.h" 51 #include "llvm/IR/ModuleSlotTracker.h" 52 #include "llvm/IR/Type.h" 53 #include "llvm/IR/Value.h" 54 #include "llvm/MC/MCInstrDesc.h" 55 #include "llvm/MC/MCRegisterInfo.h" 56 #include "llvm/MC/MCSymbol.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Compiler.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Support/LowLevelTypeImpl.h" 63 #include "llvm/Support/MathExtras.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Target/TargetIntrinsicInfo.h" 66 #include "llvm/Target/TargetMachine.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstddef> 70 #include <cstdint> 71 #include <cstring> 72 #include <iterator> 73 #include <utility> 74 75 using namespace llvm; 76 77 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) { 78 if (MCID->ImplicitDefs) 79 for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs; 80 ++ImpDefs) 81 addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true)); 82 if (MCID->ImplicitUses) 83 for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses; 84 ++ImpUses) 85 addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true)); 86 } 87 88 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the 89 /// implicit operands. It reserves space for the number of operands specified by 90 /// the MCInstrDesc. 91 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid, 92 DebugLoc dl, bool NoImp) 93 : MCID(&tid), debugLoc(std::move(dl)) { 94 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); 95 96 // Reserve space for the expected number of operands. 97 if (unsigned NumOps = MCID->getNumOperands() + 98 MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) { 99 CapOperands = OperandCapacity::get(NumOps); 100 Operands = MF.allocateOperandArray(CapOperands); 101 } 102 103 if (!NoImp) 104 addImplicitDefUseOperands(MF); 105 } 106 107 /// MachineInstr ctor - Copies MachineInstr arg exactly 108 /// 109 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI) 110 : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs), 111 debugLoc(MI.getDebugLoc()) { 112 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); 113 114 CapOperands = OperandCapacity::get(MI.getNumOperands()); 115 Operands = MF.allocateOperandArray(CapOperands); 116 117 // Copy operands. 118 for (const MachineOperand &MO : MI.operands()) 119 addOperand(MF, MO); 120 121 // Copy all the sensible flags. 122 setFlags(MI.Flags); 123 } 124 125 /// getRegInfo - If this instruction is embedded into a MachineFunction, 126 /// return the MachineRegisterInfo object for the current function, otherwise 127 /// return null. 128 MachineRegisterInfo *MachineInstr::getRegInfo() { 129 if (MachineBasicBlock *MBB = getParent()) 130 return &MBB->getParent()->getRegInfo(); 131 return nullptr; 132 } 133 134 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in 135 /// this instruction from their respective use lists. This requires that the 136 /// operands already be on their use lists. 137 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) { 138 for (MachineOperand &MO : operands()) 139 if (MO.isReg()) 140 MRI.removeRegOperandFromUseList(&MO); 141 } 142 143 /// AddRegOperandsToUseLists - Add all of the register operands in 144 /// this instruction from their respective use lists. This requires that the 145 /// operands not be on their use lists yet. 146 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) { 147 for (MachineOperand &MO : operands()) 148 if (MO.isReg()) 149 MRI.addRegOperandToUseList(&MO); 150 } 151 152 void MachineInstr::addOperand(const MachineOperand &Op) { 153 MachineBasicBlock *MBB = getParent(); 154 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs"); 155 MachineFunction *MF = MBB->getParent(); 156 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs"); 157 addOperand(*MF, Op); 158 } 159 160 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping 161 /// ranges. If MRI is non-null also update use-def chains. 162 static void moveOperands(MachineOperand *Dst, MachineOperand *Src, 163 unsigned NumOps, MachineRegisterInfo *MRI) { 164 if (MRI) 165 return MRI->moveOperands(Dst, Src, NumOps); 166 167 // MachineOperand is a trivially copyable type so we can just use memmove. 168 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand)); 169 } 170 171 /// addOperand - Add the specified operand to the instruction. If it is an 172 /// implicit operand, it is added to the end of the operand list. If it is 173 /// an explicit operand it is added at the end of the explicit operand list 174 /// (before the first implicit operand). 175 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) { 176 assert(MCID && "Cannot add operands before providing an instr descriptor"); 177 178 // Check if we're adding one of our existing operands. 179 if (&Op >= Operands && &Op < Operands + NumOperands) { 180 // This is unusual: MI->addOperand(MI->getOperand(i)). 181 // If adding Op requires reallocating or moving existing operands around, 182 // the Op reference could go stale. Support it by copying Op. 183 MachineOperand CopyOp(Op); 184 return addOperand(MF, CopyOp); 185 } 186 187 // Find the insert location for the new operand. Implicit registers go at 188 // the end, everything else goes before the implicit regs. 189 // 190 // FIXME: Allow mixed explicit and implicit operands on inline asm. 191 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as 192 // implicit-defs, but they must not be moved around. See the FIXME in 193 // InstrEmitter.cpp. 194 unsigned OpNo = getNumOperands(); 195 bool isImpReg = Op.isReg() && Op.isImplicit(); 196 if (!isImpReg && !isInlineAsm()) { 197 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) { 198 --OpNo; 199 assert(!Operands[OpNo].isTied() && "Cannot move tied operands"); 200 } 201 } 202 203 #ifndef NDEBUG 204 bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata; 205 // OpNo now points as the desired insertion point. Unless this is a variadic 206 // instruction, only implicit regs are allowed beyond MCID->getNumOperands(). 207 // RegMask operands go between the explicit and implicit operands. 208 assert((isImpReg || Op.isRegMask() || MCID->isVariadic() || 209 OpNo < MCID->getNumOperands() || isMetaDataOp) && 210 "Trying to add an operand to a machine instr that is already done!"); 211 #endif 212 213 MachineRegisterInfo *MRI = getRegInfo(); 214 215 // Determine if the Operands array needs to be reallocated. 216 // Save the old capacity and operand array. 217 OperandCapacity OldCap = CapOperands; 218 MachineOperand *OldOperands = Operands; 219 if (!OldOperands || OldCap.getSize() == getNumOperands()) { 220 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1); 221 Operands = MF.allocateOperandArray(CapOperands); 222 // Move the operands before the insertion point. 223 if (OpNo) 224 moveOperands(Operands, OldOperands, OpNo, MRI); 225 } 226 227 // Move the operands following the insertion point. 228 if (OpNo != NumOperands) 229 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo, 230 MRI); 231 ++NumOperands; 232 233 // Deallocate the old operand array. 234 if (OldOperands != Operands && OldOperands) 235 MF.deallocateOperandArray(OldCap, OldOperands); 236 237 // Copy Op into place. It still needs to be inserted into the MRI use lists. 238 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op); 239 NewMO->ParentMI = this; 240 241 // When adding a register operand, tell MRI about it. 242 if (NewMO->isReg()) { 243 // Ensure isOnRegUseList() returns false, regardless of Op's status. 244 NewMO->Contents.Reg.Prev = nullptr; 245 // Ignore existing ties. This is not a property that can be copied. 246 NewMO->TiedTo = 0; 247 // Add the new operand to MRI, but only for instructions in an MBB. 248 if (MRI) 249 MRI->addRegOperandToUseList(NewMO); 250 // The MCID operand information isn't accurate until we start adding 251 // explicit operands. The implicit operands are added first, then the 252 // explicits are inserted before them. 253 if (!isImpReg) { 254 // Tie uses to defs as indicated in MCInstrDesc. 255 if (NewMO->isUse()) { 256 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO); 257 if (DefIdx != -1) 258 tieOperands(DefIdx, OpNo); 259 } 260 // If the register operand is flagged as early, mark the operand as such. 261 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1) 262 NewMO->setIsEarlyClobber(true); 263 } 264 } 265 } 266 267 /// RemoveOperand - Erase an operand from an instruction, leaving it with one 268 /// fewer operand than it started with. 269 /// 270 void MachineInstr::RemoveOperand(unsigned OpNo) { 271 assert(OpNo < getNumOperands() && "Invalid operand number"); 272 untieRegOperand(OpNo); 273 274 #ifndef NDEBUG 275 // Moving tied operands would break the ties. 276 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i) 277 if (Operands[i].isReg()) 278 assert(!Operands[i].isTied() && "Cannot move tied operands"); 279 #endif 280 281 MachineRegisterInfo *MRI = getRegInfo(); 282 if (MRI && Operands[OpNo].isReg()) 283 MRI->removeRegOperandFromUseList(Operands + OpNo); 284 285 // Don't call the MachineOperand destructor. A lot of this code depends on 286 // MachineOperand having a trivial destructor anyway, and adding a call here 287 // wouldn't make it 'destructor-correct'. 288 289 if (unsigned N = NumOperands - 1 - OpNo) 290 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI); 291 --NumOperands; 292 } 293 294 /// addMemOperand - Add a MachineMemOperand to the machine instruction. 295 /// This function should be used only occasionally. The setMemRefs function 296 /// is the primary method for setting up a MachineInstr's MemRefs list. 297 void MachineInstr::addMemOperand(MachineFunction &MF, 298 MachineMemOperand *MO) { 299 mmo_iterator OldMemRefs = MemRefs; 300 unsigned OldNumMemRefs = NumMemRefs; 301 302 unsigned NewNum = NumMemRefs + 1; 303 mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum); 304 305 std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs); 306 NewMemRefs[NewNum - 1] = MO; 307 setMemRefs(NewMemRefs, NewMemRefs + NewNum); 308 } 309 310 /// Check to see if the MMOs pointed to by the two MemRefs arrays are 311 /// identical. 312 static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) { 313 auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end(); 314 auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end(); 315 if ((E1 - I1) != (E2 - I2)) 316 return false; 317 for (; I1 != E1; ++I1, ++I2) { 318 if (**I1 != **I2) 319 return false; 320 } 321 return true; 322 } 323 324 std::pair<MachineInstr::mmo_iterator, unsigned> 325 MachineInstr::mergeMemRefsWith(const MachineInstr& Other) { 326 327 // If either of the incoming memrefs are empty, we must be conservative and 328 // treat this as if we've exhausted our space for memrefs and dropped them. 329 if (memoperands_empty() || Other.memoperands_empty()) 330 return std::make_pair(nullptr, 0); 331 332 // If both instructions have identical memrefs, we don't need to merge them. 333 // Since many instructions have a single memref, and we tend to merge things 334 // like pairs of loads from the same location, this catches a large number of 335 // cases in practice. 336 if (hasIdenticalMMOs(*this, Other)) 337 return std::make_pair(MemRefs, NumMemRefs); 338 339 // TODO: consider uniquing elements within the operand lists to reduce 340 // space usage and fall back to conservative information less often. 341 size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs; 342 343 // If we don't have enough room to store this many memrefs, be conservative 344 // and drop them. Otherwise, we'd fail asserts when trying to add them to 345 // the new instruction. 346 if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs)) 347 return std::make_pair(nullptr, 0); 348 349 MachineFunction *MF = getMF(); 350 mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs); 351 mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(), 352 MemBegin); 353 MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(), 354 MemEnd); 355 assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs && 356 "missing memrefs"); 357 358 return std::make_pair(MemBegin, CombinedNumMemRefs); 359 } 360 361 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const { 362 assert(!isBundledWithPred() && "Must be called on bundle header"); 363 for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) { 364 if (MII->getDesc().getFlags() & Mask) { 365 if (Type == AnyInBundle) 366 return true; 367 } else { 368 if (Type == AllInBundle && !MII->isBundle()) 369 return false; 370 } 371 // This was the last instruction in the bundle. 372 if (!MII->isBundledWithSucc()) 373 return Type == AllInBundle; 374 } 375 } 376 377 bool MachineInstr::isIdenticalTo(const MachineInstr &Other, 378 MICheckType Check) const { 379 // If opcodes or number of operands are not the same then the two 380 // instructions are obviously not identical. 381 if (Other.getOpcode() != getOpcode() || 382 Other.getNumOperands() != getNumOperands()) 383 return false; 384 385 if (isBundle()) { 386 // We have passed the test above that both instructions have the same 387 // opcode, so we know that both instructions are bundles here. Let's compare 388 // MIs inside the bundle. 389 assert(Other.isBundle() && "Expected that both instructions are bundles."); 390 MachineBasicBlock::const_instr_iterator I1 = getIterator(); 391 MachineBasicBlock::const_instr_iterator I2 = Other.getIterator(); 392 // Loop until we analysed the last intruction inside at least one of the 393 // bundles. 394 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) { 395 ++I1; 396 ++I2; 397 if (!I1->isIdenticalTo(*I2, Check)) 398 return false; 399 } 400 // If we've reached the end of just one of the two bundles, but not both, 401 // the instructions are not identical. 402 if (I1->isBundledWithSucc() || I2->isBundledWithSucc()) 403 return false; 404 } 405 406 // Check operands to make sure they match. 407 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 408 const MachineOperand &MO = getOperand(i); 409 const MachineOperand &OMO = Other.getOperand(i); 410 if (!MO.isReg()) { 411 if (!MO.isIdenticalTo(OMO)) 412 return false; 413 continue; 414 } 415 416 // Clients may or may not want to ignore defs when testing for equality. 417 // For example, machine CSE pass only cares about finding common 418 // subexpressions, so it's safe to ignore virtual register defs. 419 if (MO.isDef()) { 420 if (Check == IgnoreDefs) 421 continue; 422 else if (Check == IgnoreVRegDefs) { 423 if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) || 424 !TargetRegisterInfo::isVirtualRegister(OMO.getReg())) 425 if (!MO.isIdenticalTo(OMO)) 426 return false; 427 } else { 428 if (!MO.isIdenticalTo(OMO)) 429 return false; 430 if (Check == CheckKillDead && MO.isDead() != OMO.isDead()) 431 return false; 432 } 433 } else { 434 if (!MO.isIdenticalTo(OMO)) 435 return false; 436 if (Check == CheckKillDead && MO.isKill() != OMO.isKill()) 437 return false; 438 } 439 } 440 // If DebugLoc does not match then two dbg.values are not identical. 441 if (isDebugValue()) 442 if (getDebugLoc() && Other.getDebugLoc() && 443 getDebugLoc() != Other.getDebugLoc()) 444 return false; 445 return true; 446 } 447 448 const MachineFunction *MachineInstr::getMF() const { 449 return getParent()->getParent(); 450 } 451 452 MachineInstr *MachineInstr::removeFromParent() { 453 assert(getParent() && "Not embedded in a basic block!"); 454 return getParent()->remove(this); 455 } 456 457 MachineInstr *MachineInstr::removeFromBundle() { 458 assert(getParent() && "Not embedded in a basic block!"); 459 return getParent()->remove_instr(this); 460 } 461 462 void MachineInstr::eraseFromParent() { 463 assert(getParent() && "Not embedded in a basic block!"); 464 getParent()->erase(this); 465 } 466 467 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() { 468 assert(getParent() && "Not embedded in a basic block!"); 469 MachineBasicBlock *MBB = getParent(); 470 MachineFunction *MF = MBB->getParent(); 471 assert(MF && "Not embedded in a function!"); 472 473 MachineInstr *MI = (MachineInstr *)this; 474 MachineRegisterInfo &MRI = MF->getRegInfo(); 475 476 for (const MachineOperand &MO : MI->operands()) { 477 if (!MO.isReg() || !MO.isDef()) 478 continue; 479 unsigned Reg = MO.getReg(); 480 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 481 continue; 482 MRI.markUsesInDebugValueAsUndef(Reg); 483 } 484 MI->eraseFromParent(); 485 } 486 487 void MachineInstr::eraseFromBundle() { 488 assert(getParent() && "Not embedded in a basic block!"); 489 getParent()->erase_instr(this); 490 } 491 492 /// getNumExplicitOperands - Returns the number of non-implicit operands. 493 /// 494 unsigned MachineInstr::getNumExplicitOperands() const { 495 unsigned NumOperands = MCID->getNumOperands(); 496 if (!MCID->isVariadic()) 497 return NumOperands; 498 499 for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) { 500 const MachineOperand &MO = getOperand(i); 501 if (!MO.isReg() || !MO.isImplicit()) 502 NumOperands++; 503 } 504 return NumOperands; 505 } 506 507 void MachineInstr::bundleWithPred() { 508 assert(!isBundledWithPred() && "MI is already bundled with its predecessor"); 509 setFlag(BundledPred); 510 MachineBasicBlock::instr_iterator Pred = getIterator(); 511 --Pred; 512 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags"); 513 Pred->setFlag(BundledSucc); 514 } 515 516 void MachineInstr::bundleWithSucc() { 517 assert(!isBundledWithSucc() && "MI is already bundled with its successor"); 518 setFlag(BundledSucc); 519 MachineBasicBlock::instr_iterator Succ = getIterator(); 520 ++Succ; 521 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags"); 522 Succ->setFlag(BundledPred); 523 } 524 525 void MachineInstr::unbundleFromPred() { 526 assert(isBundledWithPred() && "MI isn't bundled with its predecessor"); 527 clearFlag(BundledPred); 528 MachineBasicBlock::instr_iterator Pred = getIterator(); 529 --Pred; 530 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags"); 531 Pred->clearFlag(BundledSucc); 532 } 533 534 void MachineInstr::unbundleFromSucc() { 535 assert(isBundledWithSucc() && "MI isn't bundled with its successor"); 536 clearFlag(BundledSucc); 537 MachineBasicBlock::instr_iterator Succ = getIterator(); 538 ++Succ; 539 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags"); 540 Succ->clearFlag(BundledPred); 541 } 542 543 bool MachineInstr::isStackAligningInlineAsm() const { 544 if (isInlineAsm()) { 545 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 546 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 547 return true; 548 } 549 return false; 550 } 551 552 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const { 553 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!"); 554 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 555 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0); 556 } 557 558 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx, 559 unsigned *GroupNo) const { 560 assert(isInlineAsm() && "Expected an inline asm instruction"); 561 assert(OpIdx < getNumOperands() && "OpIdx out of range"); 562 563 // Ignore queries about the initial operands. 564 if (OpIdx < InlineAsm::MIOp_FirstOperand) 565 return -1; 566 567 unsigned Group = 0; 568 unsigned NumOps; 569 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; 570 i += NumOps) { 571 const MachineOperand &FlagMO = getOperand(i); 572 // If we reach the implicit register operands, stop looking. 573 if (!FlagMO.isImm()) 574 return -1; 575 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); 576 if (i + NumOps > OpIdx) { 577 if (GroupNo) 578 *GroupNo = Group; 579 return i; 580 } 581 ++Group; 582 } 583 return -1; 584 } 585 586 const DILocalVariable *MachineInstr::getDebugVariable() const { 587 assert(isDebugValue() && "not a DBG_VALUE"); 588 return cast<DILocalVariable>(getOperand(2).getMetadata()); 589 } 590 591 const DIExpression *MachineInstr::getDebugExpression() const { 592 assert(isDebugValue() && "not a DBG_VALUE"); 593 return cast<DIExpression>(getOperand(3).getMetadata()); 594 } 595 596 const TargetRegisterClass* 597 MachineInstr::getRegClassConstraint(unsigned OpIdx, 598 const TargetInstrInfo *TII, 599 const TargetRegisterInfo *TRI) const { 600 assert(getParent() && "Can't have an MBB reference here!"); 601 assert(getMF() && "Can't have an MF reference here!"); 602 const MachineFunction &MF = *getMF(); 603 604 // Most opcodes have fixed constraints in their MCInstrDesc. 605 if (!isInlineAsm()) 606 return TII->getRegClass(getDesc(), OpIdx, TRI, MF); 607 608 if (!getOperand(OpIdx).isReg()) 609 return nullptr; 610 611 // For tied uses on inline asm, get the constraint from the def. 612 unsigned DefIdx; 613 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx)) 614 OpIdx = DefIdx; 615 616 // Inline asm stores register class constraints in the flag word. 617 int FlagIdx = findInlineAsmFlagIdx(OpIdx); 618 if (FlagIdx < 0) 619 return nullptr; 620 621 unsigned Flag = getOperand(FlagIdx).getImm(); 622 unsigned RCID; 623 if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse || 624 InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef || 625 InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) && 626 InlineAsm::hasRegClassConstraint(Flag, RCID)) 627 return TRI->getRegClass(RCID); 628 629 // Assume that all registers in a memory operand are pointers. 630 if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem) 631 return TRI->getPointerRegClass(MF); 632 633 return nullptr; 634 } 635 636 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg( 637 unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, 638 const TargetRegisterInfo *TRI, bool ExploreBundle) const { 639 // Check every operands inside the bundle if we have 640 // been asked to. 641 if (ExploreBundle) 642 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC; 643 ++OpndIt) 644 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl( 645 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI); 646 else 647 // Otherwise, just check the current operands. 648 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i) 649 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI); 650 return CurRC; 651 } 652 653 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl( 654 unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC, 655 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const { 656 assert(CurRC && "Invalid initial register class"); 657 // Check if Reg is constrained by some of its use/def from MI. 658 const MachineOperand &MO = getOperand(OpIdx); 659 if (!MO.isReg() || MO.getReg() != Reg) 660 return CurRC; 661 // If yes, accumulate the constraints through the operand. 662 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI); 663 } 664 665 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect( 666 unsigned OpIdx, const TargetRegisterClass *CurRC, 667 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const { 668 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI); 669 const MachineOperand &MO = getOperand(OpIdx); 670 assert(MO.isReg() && 671 "Cannot get register constraints for non-register operand"); 672 assert(CurRC && "Invalid initial register class"); 673 if (unsigned SubIdx = MO.getSubReg()) { 674 if (OpRC) 675 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx); 676 else 677 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx); 678 } else if (OpRC) 679 CurRC = TRI->getCommonSubClass(CurRC, OpRC); 680 return CurRC; 681 } 682 683 /// Return the number of instructions inside the MI bundle, not counting the 684 /// header instruction. 685 unsigned MachineInstr::getBundleSize() const { 686 MachineBasicBlock::const_instr_iterator I = getIterator(); 687 unsigned Size = 0; 688 while (I->isBundledWithSucc()) { 689 ++Size; 690 ++I; 691 } 692 return Size; 693 } 694 695 /// Returns true if the MachineInstr has an implicit-use operand of exactly 696 /// the given register (not considering sub/super-registers). 697 bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const { 698 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 699 const MachineOperand &MO = getOperand(i); 700 if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg) 701 return true; 702 } 703 return false; 704 } 705 706 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of 707 /// the specific register or -1 if it is not found. It further tightens 708 /// the search criteria to a use that kills the register if isKill is true. 709 int MachineInstr::findRegisterUseOperandIdx( 710 unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const { 711 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 712 const MachineOperand &MO = getOperand(i); 713 if (!MO.isReg() || !MO.isUse()) 714 continue; 715 unsigned MOReg = MO.getReg(); 716 if (!MOReg) 717 continue; 718 if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) && 719 TargetRegisterInfo::isPhysicalRegister(Reg) && 720 TRI->isSubRegister(MOReg, Reg))) 721 if (!isKill || MO.isKill()) 722 return i; 723 } 724 return -1; 725 } 726 727 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes) 728 /// indicating if this instruction reads or writes Reg. This also considers 729 /// partial defines. 730 std::pair<bool,bool> 731 MachineInstr::readsWritesVirtualRegister(unsigned Reg, 732 SmallVectorImpl<unsigned> *Ops) const { 733 bool PartDef = false; // Partial redefine. 734 bool FullDef = false; // Full define. 735 bool Use = false; 736 737 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 738 const MachineOperand &MO = getOperand(i); 739 if (!MO.isReg() || MO.getReg() != Reg) 740 continue; 741 if (Ops) 742 Ops->push_back(i); 743 if (MO.isUse()) 744 Use |= !MO.isUndef(); 745 else if (MO.getSubReg() && !MO.isUndef()) 746 // A partial def undef doesn't count as reading the register. 747 PartDef = true; 748 else 749 FullDef = true; 750 } 751 // A partial redefine uses Reg unless there is also a full define. 752 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef); 753 } 754 755 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of 756 /// the specified register or -1 if it is not found. If isDead is true, defs 757 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it 758 /// also checks if there is a def of a super-register. 759 int 760 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap, 761 const TargetRegisterInfo *TRI) const { 762 bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg); 763 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 764 const MachineOperand &MO = getOperand(i); 765 // Accept regmask operands when Overlap is set. 766 // Ignore them when looking for a specific def operand (Overlap == false). 767 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg)) 768 return i; 769 if (!MO.isReg() || !MO.isDef()) 770 continue; 771 unsigned MOReg = MO.getReg(); 772 bool Found = (MOReg == Reg); 773 if (!Found && TRI && isPhys && 774 TargetRegisterInfo::isPhysicalRegister(MOReg)) { 775 if (Overlap) 776 Found = TRI->regsOverlap(MOReg, Reg); 777 else 778 Found = TRI->isSubRegister(MOReg, Reg); 779 } 780 if (Found && (!isDead || MO.isDead())) 781 return i; 782 } 783 return -1; 784 } 785 786 /// findFirstPredOperandIdx() - Find the index of the first operand in the 787 /// operand list that is used to represent the predicate. It returns -1 if 788 /// none is found. 789 int MachineInstr::findFirstPredOperandIdx() const { 790 // Don't call MCID.findFirstPredOperandIdx() because this variant 791 // is sometimes called on an instruction that's not yet complete, and 792 // so the number of operands is less than the MCID indicates. In 793 // particular, the PTX target does this. 794 const MCInstrDesc &MCID = getDesc(); 795 if (MCID.isPredicable()) { 796 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 797 if (MCID.OpInfo[i].isPredicate()) 798 return i; 799 } 800 801 return -1; 802 } 803 804 // MachineOperand::TiedTo is 4 bits wide. 805 const unsigned TiedMax = 15; 806 807 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other. 808 /// 809 /// Use and def operands can be tied together, indicated by a non-zero TiedTo 810 /// field. TiedTo can have these values: 811 /// 812 /// 0: Operand is not tied to anything. 813 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1). 814 /// TiedMax: Tied to an operand >= TiedMax-1. 815 /// 816 /// The tied def must be one of the first TiedMax operands on a normal 817 /// instruction. INLINEASM instructions allow more tied defs. 818 /// 819 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) { 820 MachineOperand &DefMO = getOperand(DefIdx); 821 MachineOperand &UseMO = getOperand(UseIdx); 822 assert(DefMO.isDef() && "DefIdx must be a def operand"); 823 assert(UseMO.isUse() && "UseIdx must be a use operand"); 824 assert(!DefMO.isTied() && "Def is already tied to another use"); 825 assert(!UseMO.isTied() && "Use is already tied to another def"); 826 827 if (DefIdx < TiedMax) 828 UseMO.TiedTo = DefIdx + 1; 829 else { 830 // Inline asm can use the group descriptors to find tied operands, but on 831 // normal instruction, the tied def must be within the first TiedMax 832 // operands. 833 assert(isInlineAsm() && "DefIdx out of range"); 834 UseMO.TiedTo = TiedMax; 835 } 836 837 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx(). 838 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax); 839 } 840 841 /// Given the index of a tied register operand, find the operand it is tied to. 842 /// Defs are tied to uses and vice versa. Returns the index of the tied operand 843 /// which must exist. 844 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const { 845 const MachineOperand &MO = getOperand(OpIdx); 846 assert(MO.isTied() && "Operand isn't tied"); 847 848 // Normally TiedTo is in range. 849 if (MO.TiedTo < TiedMax) 850 return MO.TiedTo - 1; 851 852 // Uses on normal instructions can be out of range. 853 if (!isInlineAsm()) { 854 // Normal tied defs must be in the 0..TiedMax-1 range. 855 if (MO.isUse()) 856 return TiedMax - 1; 857 // MO is a def. Search for the tied use. 858 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) { 859 const MachineOperand &UseMO = getOperand(i); 860 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1) 861 return i; 862 } 863 llvm_unreachable("Can't find tied use"); 864 } 865 866 // Now deal with inline asm by parsing the operand group descriptor flags. 867 // Find the beginning of each operand group. 868 SmallVector<unsigned, 8> GroupIdx; 869 unsigned OpIdxGroup = ~0u; 870 unsigned NumOps; 871 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; 872 i += NumOps) { 873 const MachineOperand &FlagMO = getOperand(i); 874 assert(FlagMO.isImm() && "Invalid tied operand on inline asm"); 875 unsigned CurGroup = GroupIdx.size(); 876 GroupIdx.push_back(i); 877 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); 878 // OpIdx belongs to this operand group. 879 if (OpIdx > i && OpIdx < i + NumOps) 880 OpIdxGroup = CurGroup; 881 unsigned TiedGroup; 882 if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup)) 883 continue; 884 // Operands in this group are tied to operands in TiedGroup which must be 885 // earlier. Find the number of operands between the two groups. 886 unsigned Delta = i - GroupIdx[TiedGroup]; 887 888 // OpIdx is a use tied to TiedGroup. 889 if (OpIdxGroup == CurGroup) 890 return OpIdx - Delta; 891 892 // OpIdx is a def tied to this use group. 893 if (OpIdxGroup == TiedGroup) 894 return OpIdx + Delta; 895 } 896 llvm_unreachable("Invalid tied operand on inline asm"); 897 } 898 899 /// clearKillInfo - Clears kill flags on all operands. 900 /// 901 void MachineInstr::clearKillInfo() { 902 for (MachineOperand &MO : operands()) { 903 if (MO.isReg() && MO.isUse()) 904 MO.setIsKill(false); 905 } 906 } 907 908 void MachineInstr::substituteRegister(unsigned FromReg, 909 unsigned ToReg, 910 unsigned SubIdx, 911 const TargetRegisterInfo &RegInfo) { 912 if (TargetRegisterInfo::isPhysicalRegister(ToReg)) { 913 if (SubIdx) 914 ToReg = RegInfo.getSubReg(ToReg, SubIdx); 915 for (MachineOperand &MO : operands()) { 916 if (!MO.isReg() || MO.getReg() != FromReg) 917 continue; 918 MO.substPhysReg(ToReg, RegInfo); 919 } 920 } else { 921 for (MachineOperand &MO : operands()) { 922 if (!MO.isReg() || MO.getReg() != FromReg) 923 continue; 924 MO.substVirtReg(ToReg, SubIdx, RegInfo); 925 } 926 } 927 } 928 929 /// isSafeToMove - Return true if it is safe to move this instruction. If 930 /// SawStore is set to true, it means that there is a store (or call) between 931 /// the instruction's location and its intended destination. 932 bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const { 933 // Ignore stuff that we obviously can't move. 934 // 935 // Treat volatile loads as stores. This is not strictly necessary for 936 // volatiles, but it is required for atomic loads. It is not allowed to move 937 // a load across an atomic load with Ordering > Monotonic. 938 if (mayStore() || isCall() || isPHI() || 939 (mayLoad() && hasOrderedMemoryRef())) { 940 SawStore = true; 941 return false; 942 } 943 944 if (isPosition() || isDebugValue() || isTerminator() || 945 hasUnmodeledSideEffects()) 946 return false; 947 948 // See if this instruction does a load. If so, we have to guarantee that the 949 // loaded value doesn't change between the load and the its intended 950 // destination. The check for isInvariantLoad gives the targe the chance to 951 // classify the load as always returning a constant, e.g. a constant pool 952 // load. 953 if (mayLoad() && !isDereferenceableInvariantLoad(AA)) 954 // Otherwise, this is a real load. If there is a store between the load and 955 // end of block, we can't move it. 956 return !SawStore; 957 958 return true; 959 } 960 961 bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other, 962 bool UseTBAA) { 963 const MachineFunction *MF = getMF(); 964 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 965 const MachineFrameInfo &MFI = MF->getFrameInfo(); 966 967 // If neither instruction stores to memory, they can't alias in any 968 // meaningful way, even if they read from the same address. 969 if (!mayStore() && !Other.mayStore()) 970 return false; 971 972 // Let the target decide if memory accesses cannot possibly overlap. 973 if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA)) 974 return false; 975 976 // FIXME: Need to handle multiple memory operands to support all targets. 977 if (!hasOneMemOperand() || !Other.hasOneMemOperand()) 978 return true; 979 980 MachineMemOperand *MMOa = *memoperands_begin(); 981 MachineMemOperand *MMOb = *Other.memoperands_begin(); 982 983 // The following interface to AA is fashioned after DAGCombiner::isAlias 984 // and operates with MachineMemOperand offset with some important 985 // assumptions: 986 // - LLVM fundamentally assumes flat address spaces. 987 // - MachineOperand offset can *only* result from legalization and 988 // cannot affect queries other than the trivial case of overlap 989 // checking. 990 // - These offsets never wrap and never step outside 991 // of allocated objects. 992 // - There should never be any negative offsets here. 993 // 994 // FIXME: Modify API to hide this math from "user" 995 // Even before we go to AA we can reason locally about some 996 // memory objects. It can save compile time, and possibly catch some 997 // corner cases not currently covered. 998 999 int64_t OffsetA = MMOa->getOffset(); 1000 int64_t OffsetB = MMOb->getOffset(); 1001 1002 int64_t MinOffset = std::min(OffsetA, OffsetB); 1003 int64_t WidthA = MMOa->getSize(); 1004 int64_t WidthB = MMOb->getSize(); 1005 const Value *ValA = MMOa->getValue(); 1006 const Value *ValB = MMOb->getValue(); 1007 bool SameVal = (ValA && ValB && (ValA == ValB)); 1008 if (!SameVal) { 1009 const PseudoSourceValue *PSVa = MMOa->getPseudoValue(); 1010 const PseudoSourceValue *PSVb = MMOb->getPseudoValue(); 1011 if (PSVa && ValB && !PSVa->mayAlias(&MFI)) 1012 return false; 1013 if (PSVb && ValA && !PSVb->mayAlias(&MFI)) 1014 return false; 1015 if (PSVa && PSVb && (PSVa == PSVb)) 1016 SameVal = true; 1017 } 1018 1019 if (SameVal) { 1020 int64_t MaxOffset = std::max(OffsetA, OffsetB); 1021 int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB; 1022 return (MinOffset + LowWidth > MaxOffset); 1023 } 1024 1025 if (!AA) 1026 return true; 1027 1028 if (!ValA || !ValB) 1029 return true; 1030 1031 assert((OffsetA >= 0) && "Negative MachineMemOperand offset"); 1032 assert((OffsetB >= 0) && "Negative MachineMemOperand offset"); 1033 1034 int64_t Overlapa = WidthA + OffsetA - MinOffset; 1035 int64_t Overlapb = WidthB + OffsetB - MinOffset; 1036 1037 AliasResult AAResult = AA->alias( 1038 MemoryLocation(ValA, Overlapa, 1039 UseTBAA ? MMOa->getAAInfo() : AAMDNodes()), 1040 MemoryLocation(ValB, Overlapb, 1041 UseTBAA ? MMOb->getAAInfo() : AAMDNodes())); 1042 1043 return (AAResult != NoAlias); 1044 } 1045 1046 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered 1047 /// or volatile memory reference, or if the information describing the memory 1048 /// reference is not available. Return false if it is known to have no ordered 1049 /// memory references. 1050 bool MachineInstr::hasOrderedMemoryRef() const { 1051 // An instruction known never to access memory won't have a volatile access. 1052 if (!mayStore() && 1053 !mayLoad() && 1054 !isCall() && 1055 !hasUnmodeledSideEffects()) 1056 return false; 1057 1058 // Otherwise, if the instruction has no memory reference information, 1059 // conservatively assume it wasn't preserved. 1060 if (memoperands_empty()) 1061 return true; 1062 1063 // Check if any of our memory operands are ordered. 1064 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) { 1065 return !MMO->isUnordered(); 1066 }); 1067 } 1068 1069 /// isDereferenceableInvariantLoad - Return true if this instruction will never 1070 /// trap and is loading from a location whose value is invariant across a run of 1071 /// this function. 1072 bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const { 1073 // If the instruction doesn't load at all, it isn't an invariant load. 1074 if (!mayLoad()) 1075 return false; 1076 1077 // If the instruction has lost its memoperands, conservatively assume that 1078 // it may not be an invariant load. 1079 if (memoperands_empty()) 1080 return false; 1081 1082 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo(); 1083 1084 for (MachineMemOperand *MMO : memoperands()) { 1085 if (MMO->isVolatile()) return false; 1086 if (MMO->isStore()) return false; 1087 if (MMO->isInvariant() && MMO->isDereferenceable()) 1088 continue; 1089 1090 // A load from a constant PseudoSourceValue is invariant. 1091 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) 1092 if (PSV->isConstant(&MFI)) 1093 continue; 1094 1095 if (const Value *V = MMO->getValue()) { 1096 // If we have an AliasAnalysis, ask it whether the memory is constant. 1097 if (AA && 1098 AA->pointsToConstantMemory( 1099 MemoryLocation(V, MMO->getSize(), MMO->getAAInfo()))) 1100 continue; 1101 } 1102 1103 // Otherwise assume conservatively. 1104 return false; 1105 } 1106 1107 // Everything checks out. 1108 return true; 1109 } 1110 1111 /// isConstantValuePHI - If the specified instruction is a PHI that always 1112 /// merges together the same virtual register, return the register, otherwise 1113 /// return 0. 1114 unsigned MachineInstr::isConstantValuePHI() const { 1115 if (!isPHI()) 1116 return 0; 1117 assert(getNumOperands() >= 3 && 1118 "It's illegal to have a PHI without source operands"); 1119 1120 unsigned Reg = getOperand(1).getReg(); 1121 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2) 1122 if (getOperand(i).getReg() != Reg) 1123 return 0; 1124 return Reg; 1125 } 1126 1127 bool MachineInstr::hasUnmodeledSideEffects() const { 1128 if (hasProperty(MCID::UnmodeledSideEffects)) 1129 return true; 1130 if (isInlineAsm()) { 1131 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1132 if (ExtraInfo & InlineAsm::Extra_HasSideEffects) 1133 return true; 1134 } 1135 1136 return false; 1137 } 1138 1139 bool MachineInstr::isLoadFoldBarrier() const { 1140 return mayStore() || isCall() || hasUnmodeledSideEffects(); 1141 } 1142 1143 /// allDefsAreDead - Return true if all the defs of this instruction are dead. 1144 /// 1145 bool MachineInstr::allDefsAreDead() const { 1146 for (const MachineOperand &MO : operands()) { 1147 if (!MO.isReg() || MO.isUse()) 1148 continue; 1149 if (!MO.isDead()) 1150 return false; 1151 } 1152 return true; 1153 } 1154 1155 /// copyImplicitOps - Copy implicit register operands from specified 1156 /// instruction to this instruction. 1157 void MachineInstr::copyImplicitOps(MachineFunction &MF, 1158 const MachineInstr &MI) { 1159 for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands(); 1160 i != e; ++i) { 1161 const MachineOperand &MO = MI.getOperand(i); 1162 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask()) 1163 addOperand(MF, MO); 1164 } 1165 } 1166 1167 bool MachineInstr::hasComplexRegisterTies() const { 1168 const MCInstrDesc &MCID = getDesc(); 1169 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 1170 const auto &Operand = getOperand(I); 1171 if (!Operand.isReg() || Operand.isDef()) 1172 // Ignore the defined registers as MCID marks only the uses as tied. 1173 continue; 1174 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO); 1175 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1; 1176 if (ExpectedTiedIdx != TiedIdx) 1177 return true; 1178 } 1179 return false; 1180 } 1181 1182 LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, 1183 const MachineRegisterInfo &MRI) const { 1184 const MachineOperand &Op = getOperand(OpIdx); 1185 if (!Op.isReg()) 1186 return LLT{}; 1187 1188 if (isVariadic() || OpIdx >= getNumExplicitOperands()) 1189 return MRI.getType(Op.getReg()); 1190 1191 auto &OpInfo = getDesc().OpInfo[OpIdx]; 1192 if (!OpInfo.isGenericType()) 1193 return MRI.getType(Op.getReg()); 1194 1195 if (PrintedTypes[OpInfo.getGenericTypeIndex()]) 1196 return LLT{}; 1197 1198 PrintedTypes.set(OpInfo.getGenericTypeIndex()); 1199 return MRI.getType(Op.getReg()); 1200 } 1201 1202 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1203 LLVM_DUMP_METHOD void MachineInstr::dump() const { 1204 dbgs() << " "; 1205 print(dbgs()); 1206 } 1207 #endif 1208 1209 void MachineInstr::print(raw_ostream &OS, bool SkipOpers, bool SkipDebugLoc, 1210 const TargetInstrInfo *TII) const { 1211 const Module *M = nullptr; 1212 if (const MachineBasicBlock *MBB = getParent()) 1213 if (const MachineFunction *MF = MBB->getParent()) 1214 M = MF->getFunction().getParent(); 1215 1216 ModuleSlotTracker MST(M); 1217 print(OS, MST, SkipOpers, SkipDebugLoc, TII); 1218 } 1219 1220 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, 1221 bool SkipOpers, bool SkipDebugLoc, 1222 const TargetInstrInfo *TII) const { 1223 // We can be a bit tidier if we know the MachineFunction. 1224 const MachineFunction *MF = nullptr; 1225 const TargetRegisterInfo *TRI = nullptr; 1226 const MachineRegisterInfo *MRI = nullptr; 1227 const TargetIntrinsicInfo *IntrinsicInfo = nullptr; 1228 1229 if (const MachineBasicBlock *MBB = getParent()) { 1230 MF = MBB->getParent(); 1231 if (MF) { 1232 MRI = &MF->getRegInfo(); 1233 TRI = MF->getSubtarget().getRegisterInfo(); 1234 if (!TII) 1235 TII = MF->getSubtarget().getInstrInfo(); 1236 IntrinsicInfo = MF->getTarget().getIntrinsicInfo(); 1237 } 1238 } 1239 1240 // Save a list of virtual registers. 1241 SmallVector<unsigned, 8> VirtRegs; 1242 1243 SmallBitVector PrintedTypes(8); 1244 bool ShouldPrintRegisterTies = hasComplexRegisterTies(); 1245 auto getTiedOperandIdx = [&](unsigned OpIdx) { 1246 if (!ShouldPrintRegisterTies) 1247 return 0U; 1248 const MachineOperand &MO = getOperand(OpIdx); 1249 if (MO.isReg() && MO.isTied() && !MO.isDef()) 1250 return findTiedOperandIdx(OpIdx); 1251 return 0U; 1252 }; 1253 // Print explicitly defined operands on the left of an assignment syntax. 1254 unsigned StartOp = 0, e = getNumOperands(); 1255 for (; StartOp < e && getOperand(StartOp).isReg() && 1256 getOperand(StartOp).isDef() && !getOperand(StartOp).isImplicit(); 1257 ++StartOp) { 1258 if (StartOp != 0) 1259 OS << ", "; 1260 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{}; 1261 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp); 1262 getOperand(StartOp).print(OS, MST, TypeToPrint, /*PrintDef=*/false, 1263 ShouldPrintRegisterTies, TiedOperandIdx, TRI, 1264 IntrinsicInfo); 1265 unsigned Reg = getOperand(StartOp).getReg(); 1266 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1267 VirtRegs.push_back(Reg); 1268 } 1269 1270 if (StartOp != 0) 1271 OS << " = "; 1272 1273 // Print the opcode name. 1274 if (TII) 1275 OS << TII->getName(getOpcode()); 1276 else 1277 OS << "UNKNOWN"; 1278 1279 if (SkipOpers) 1280 return; 1281 1282 // Print the rest of the operands. 1283 bool FirstOp = true; 1284 unsigned AsmDescOp = ~0u; 1285 unsigned AsmOpCount = 0; 1286 1287 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) { 1288 // Print asm string. 1289 OS << " "; 1290 const unsigned OpIdx = InlineAsm::MIOp_AsmString; 1291 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{}; 1292 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx); 1293 getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, 1294 ShouldPrintRegisterTies, TiedOperandIdx, TRI, 1295 IntrinsicInfo); 1296 1297 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack 1298 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1299 if (ExtraInfo & InlineAsm::Extra_HasSideEffects) 1300 OS << " [sideeffect]"; 1301 if (ExtraInfo & InlineAsm::Extra_MayLoad) 1302 OS << " [mayload]"; 1303 if (ExtraInfo & InlineAsm::Extra_MayStore) 1304 OS << " [maystore]"; 1305 if (ExtraInfo & InlineAsm::Extra_IsConvergent) 1306 OS << " [isconvergent]"; 1307 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 1308 OS << " [alignstack]"; 1309 if (getInlineAsmDialect() == InlineAsm::AD_ATT) 1310 OS << " [attdialect]"; 1311 if (getInlineAsmDialect() == InlineAsm::AD_Intel) 1312 OS << " [inteldialect]"; 1313 1314 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand; 1315 FirstOp = false; 1316 } 1317 1318 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) { 1319 const MachineOperand &MO = getOperand(i); 1320 1321 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1322 VirtRegs.push_back(MO.getReg()); 1323 1324 if (FirstOp) FirstOp = false; else OS << ","; 1325 OS << " "; 1326 if (i < getDesc().NumOperands) { 1327 const MCOperandInfo &MCOI = getDesc().OpInfo[i]; 1328 if (MCOI.isPredicate()) 1329 OS << "pred:"; 1330 if (MCOI.isOptionalDef()) 1331 OS << "opt:"; 1332 } 1333 if (isDebugValue() && MO.isMetadata()) { 1334 // Pretty print DBG_VALUE instructions. 1335 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata()); 1336 if (DIV && !DIV->getName().empty()) 1337 OS << "!\"" << DIV->getName() << '\"'; 1338 else { 1339 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{}; 1340 unsigned TiedOperandIdx = getTiedOperandIdx(i); 1341 MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, 1342 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); 1343 } 1344 } else if (TRI && (isInsertSubreg() || isRegSequence() || 1345 (isSubregToReg() && i == 3)) && MO.isImm()) { 1346 OS << TRI->getSubRegIndexName(MO.getImm()); 1347 } else if (i == AsmDescOp && MO.isImm()) { 1348 // Pretty print the inline asm operand descriptor. 1349 OS << '$' << AsmOpCount++; 1350 unsigned Flag = MO.getImm(); 1351 switch (InlineAsm::getKind(Flag)) { 1352 case InlineAsm::Kind_RegUse: OS << ":[reguse"; break; 1353 case InlineAsm::Kind_RegDef: OS << ":[regdef"; break; 1354 case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break; 1355 case InlineAsm::Kind_Clobber: OS << ":[clobber"; break; 1356 case InlineAsm::Kind_Imm: OS << ":[imm"; break; 1357 case InlineAsm::Kind_Mem: OS << ":[mem"; break; 1358 default: OS << ":[??" << InlineAsm::getKind(Flag); break; 1359 } 1360 1361 unsigned RCID = 0; 1362 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && 1363 InlineAsm::hasRegClassConstraint(Flag, RCID)) { 1364 if (TRI) { 1365 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); 1366 } else 1367 OS << ":RC" << RCID; 1368 } 1369 1370 if (InlineAsm::isMemKind(Flag)) { 1371 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); 1372 switch (MCID) { 1373 case InlineAsm::Constraint_es: OS << ":es"; break; 1374 case InlineAsm::Constraint_i: OS << ":i"; break; 1375 case InlineAsm::Constraint_m: OS << ":m"; break; 1376 case InlineAsm::Constraint_o: OS << ":o"; break; 1377 case InlineAsm::Constraint_v: OS << ":v"; break; 1378 case InlineAsm::Constraint_Q: OS << ":Q"; break; 1379 case InlineAsm::Constraint_R: OS << ":R"; break; 1380 case InlineAsm::Constraint_S: OS << ":S"; break; 1381 case InlineAsm::Constraint_T: OS << ":T"; break; 1382 case InlineAsm::Constraint_Um: OS << ":Um"; break; 1383 case InlineAsm::Constraint_Un: OS << ":Un"; break; 1384 case InlineAsm::Constraint_Uq: OS << ":Uq"; break; 1385 case InlineAsm::Constraint_Us: OS << ":Us"; break; 1386 case InlineAsm::Constraint_Ut: OS << ":Ut"; break; 1387 case InlineAsm::Constraint_Uv: OS << ":Uv"; break; 1388 case InlineAsm::Constraint_Uy: OS << ":Uy"; break; 1389 case InlineAsm::Constraint_X: OS << ":X"; break; 1390 case InlineAsm::Constraint_Z: OS << ":Z"; break; 1391 case InlineAsm::Constraint_ZC: OS << ":ZC"; break; 1392 case InlineAsm::Constraint_Zy: OS << ":Zy"; break; 1393 default: OS << ":?"; break; 1394 } 1395 } 1396 1397 unsigned TiedTo = 0; 1398 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) 1399 OS << " tiedto:$" << TiedTo; 1400 1401 OS << ']'; 1402 1403 // Compute the index of the next operand descriptor. 1404 AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag); 1405 } else { 1406 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{}; 1407 unsigned TiedOperandIdx = getTiedOperandIdx(i); 1408 if (MO.isImm() && isOperandSubregIdx(i)) 1409 MachineOperand::printSubregIdx(OS, MO.getImm(), TRI); 1410 else 1411 MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, 1412 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo); 1413 } 1414 } 1415 1416 bool HaveSemi = false; 1417 const unsigned PrintableFlags = FrameSetup | FrameDestroy; 1418 if (Flags & PrintableFlags) { 1419 if (!HaveSemi) { 1420 OS << ";"; 1421 HaveSemi = true; 1422 } 1423 OS << " flags: "; 1424 1425 if (Flags & FrameSetup) 1426 OS << "FrameSetup"; 1427 1428 if (Flags & FrameDestroy) 1429 OS << "FrameDestroy"; 1430 } 1431 1432 if (!memoperands_empty()) { 1433 if (!HaveSemi) { 1434 OS << ";"; 1435 HaveSemi = true; 1436 } 1437 1438 OS << " mem:"; 1439 for (mmo_iterator i = memoperands_begin(), e = memoperands_end(); 1440 i != e; ++i) { 1441 (*i)->print(OS, MST); 1442 if (std::next(i) != e) 1443 OS << " "; 1444 } 1445 } 1446 1447 // Print the regclass of any virtual registers encountered. 1448 if (MRI && !VirtRegs.empty()) { 1449 if (!HaveSemi) { 1450 OS << ";"; 1451 HaveSemi = true; 1452 } 1453 for (unsigned i = 0; i != VirtRegs.size(); ++i) { 1454 const RegClassOrRegBank &RC = MRI->getRegClassOrRegBank(VirtRegs[i]); 1455 if (!RC) 1456 continue; 1457 // Generic virtual registers do not have register classes. 1458 if (RC.is<const RegisterBank *>()) 1459 OS << " " << RC.get<const RegisterBank *>()->getName(); 1460 else 1461 OS << " " 1462 << TRI->getRegClassName(RC.get<const TargetRegisterClass *>()); 1463 OS << ':' << printReg(VirtRegs[i]); 1464 for (unsigned j = i+1; j != VirtRegs.size();) { 1465 if (MRI->getRegClassOrRegBank(VirtRegs[j]) != RC) { 1466 ++j; 1467 continue; 1468 } 1469 if (VirtRegs[i] != VirtRegs[j]) 1470 OS << "," << printReg(VirtRegs[j]); 1471 VirtRegs.erase(VirtRegs.begin()+j); 1472 } 1473 } 1474 } 1475 1476 // Print debug location information. 1477 if (isDebugValue() && getOperand(e - 2).isMetadata()) { 1478 if (!HaveSemi) 1479 OS << ";"; 1480 auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata()); 1481 OS << " line no:" << DV->getLine(); 1482 if (auto *InlinedAt = debugLoc->getInlinedAt()) { 1483 DebugLoc InlinedAtDL(InlinedAt); 1484 if (InlinedAtDL && MF) { 1485 OS << " inlined @[ "; 1486 InlinedAtDL.print(OS); 1487 OS << " ]"; 1488 } 1489 } 1490 if (isIndirectDebugValue()) 1491 OS << " indirect"; 1492 } else if (SkipDebugLoc) { 1493 return; 1494 } else if (debugLoc && MF) { 1495 if (!HaveSemi) 1496 OS << ";"; 1497 OS << " dbg:"; 1498 debugLoc.print(OS); 1499 } 1500 1501 OS << '\n'; 1502 } 1503 1504 bool MachineInstr::addRegisterKilled(unsigned IncomingReg, 1505 const TargetRegisterInfo *RegInfo, 1506 bool AddIfNotFound) { 1507 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg); 1508 bool hasAliases = isPhysReg && 1509 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid(); 1510 bool Found = false; 1511 SmallVector<unsigned,4> DeadOps; 1512 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1513 MachineOperand &MO = getOperand(i); 1514 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) 1515 continue; 1516 1517 // DEBUG_VALUE nodes do not contribute to code generation and should 1518 // always be ignored. Failure to do so may result in trying to modify 1519 // KILL flags on DEBUG_VALUE nodes. 1520 if (MO.isDebug()) 1521 continue; 1522 1523 unsigned Reg = MO.getReg(); 1524 if (!Reg) 1525 continue; 1526 1527 if (Reg == IncomingReg) { 1528 if (!Found) { 1529 if (MO.isKill()) 1530 // The register is already marked kill. 1531 return true; 1532 if (isPhysReg && isRegTiedToDefOperand(i)) 1533 // Two-address uses of physregs must not be marked kill. 1534 return true; 1535 MO.setIsKill(); 1536 Found = true; 1537 } 1538 } else if (hasAliases && MO.isKill() && 1539 TargetRegisterInfo::isPhysicalRegister(Reg)) { 1540 // A super-register kill already exists. 1541 if (RegInfo->isSuperRegister(IncomingReg, Reg)) 1542 return true; 1543 if (RegInfo->isSubRegister(IncomingReg, Reg)) 1544 DeadOps.push_back(i); 1545 } 1546 } 1547 1548 // Trim unneeded kill operands. 1549 while (!DeadOps.empty()) { 1550 unsigned OpIdx = DeadOps.back(); 1551 if (getOperand(OpIdx).isImplicit()) 1552 RemoveOperand(OpIdx); 1553 else 1554 getOperand(OpIdx).setIsKill(false); 1555 DeadOps.pop_back(); 1556 } 1557 1558 // If not found, this means an alias of one of the operands is killed. Add a 1559 // new implicit operand if required. 1560 if (!Found && AddIfNotFound) { 1561 addOperand(MachineOperand::CreateReg(IncomingReg, 1562 false /*IsDef*/, 1563 true /*IsImp*/, 1564 true /*IsKill*/)); 1565 return true; 1566 } 1567 return Found; 1568 } 1569 1570 void MachineInstr::clearRegisterKills(unsigned Reg, 1571 const TargetRegisterInfo *RegInfo) { 1572 if (!TargetRegisterInfo::isPhysicalRegister(Reg)) 1573 RegInfo = nullptr; 1574 for (MachineOperand &MO : operands()) { 1575 if (!MO.isReg() || !MO.isUse() || !MO.isKill()) 1576 continue; 1577 unsigned OpReg = MO.getReg(); 1578 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg) 1579 MO.setIsKill(false); 1580 } 1581 } 1582 1583 bool MachineInstr::addRegisterDead(unsigned Reg, 1584 const TargetRegisterInfo *RegInfo, 1585 bool AddIfNotFound) { 1586 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg); 1587 bool hasAliases = isPhysReg && 1588 MCRegAliasIterator(Reg, RegInfo, false).isValid(); 1589 bool Found = false; 1590 SmallVector<unsigned,4> DeadOps; 1591 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1592 MachineOperand &MO = getOperand(i); 1593 if (!MO.isReg() || !MO.isDef()) 1594 continue; 1595 unsigned MOReg = MO.getReg(); 1596 if (!MOReg) 1597 continue; 1598 1599 if (MOReg == Reg) { 1600 MO.setIsDead(); 1601 Found = true; 1602 } else if (hasAliases && MO.isDead() && 1603 TargetRegisterInfo::isPhysicalRegister(MOReg)) { 1604 // There exists a super-register that's marked dead. 1605 if (RegInfo->isSuperRegister(Reg, MOReg)) 1606 return true; 1607 if (RegInfo->isSubRegister(Reg, MOReg)) 1608 DeadOps.push_back(i); 1609 } 1610 } 1611 1612 // Trim unneeded dead operands. 1613 while (!DeadOps.empty()) { 1614 unsigned OpIdx = DeadOps.back(); 1615 if (getOperand(OpIdx).isImplicit()) 1616 RemoveOperand(OpIdx); 1617 else 1618 getOperand(OpIdx).setIsDead(false); 1619 DeadOps.pop_back(); 1620 } 1621 1622 // If not found, this means an alias of one of the operands is dead. Add a 1623 // new implicit operand if required. 1624 if (Found || !AddIfNotFound) 1625 return Found; 1626 1627 addOperand(MachineOperand::CreateReg(Reg, 1628 true /*IsDef*/, 1629 true /*IsImp*/, 1630 false /*IsKill*/, 1631 true /*IsDead*/)); 1632 return true; 1633 } 1634 1635 void MachineInstr::clearRegisterDeads(unsigned Reg) { 1636 for (MachineOperand &MO : operands()) { 1637 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg) 1638 continue; 1639 MO.setIsDead(false); 1640 } 1641 } 1642 1643 void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) { 1644 for (MachineOperand &MO : operands()) { 1645 if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0) 1646 continue; 1647 MO.setIsUndef(IsUndef); 1648 } 1649 } 1650 1651 void MachineInstr::addRegisterDefined(unsigned Reg, 1652 const TargetRegisterInfo *RegInfo) { 1653 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 1654 MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo); 1655 if (MO) 1656 return; 1657 } else { 1658 for (const MachineOperand &MO : operands()) { 1659 if (MO.isReg() && MO.getReg() == Reg && MO.isDef() && 1660 MO.getSubReg() == 0) 1661 return; 1662 } 1663 } 1664 addOperand(MachineOperand::CreateReg(Reg, 1665 true /*IsDef*/, 1666 true /*IsImp*/)); 1667 } 1668 1669 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs, 1670 const TargetRegisterInfo &TRI) { 1671 bool HasRegMask = false; 1672 for (MachineOperand &MO : operands()) { 1673 if (MO.isRegMask()) { 1674 HasRegMask = true; 1675 continue; 1676 } 1677 if (!MO.isReg() || !MO.isDef()) continue; 1678 unsigned Reg = MO.getReg(); 1679 if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue; 1680 // If there are no uses, including partial uses, the def is dead. 1681 if (llvm::none_of(UsedRegs, 1682 [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); })) 1683 MO.setIsDead(); 1684 } 1685 1686 // This is a call with a register mask operand. 1687 // Mask clobbers are always dead, so add defs for the non-dead defines. 1688 if (HasRegMask) 1689 for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end(); 1690 I != E; ++I) 1691 addRegisterDefined(*I, &TRI); 1692 } 1693 1694 unsigned 1695 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) { 1696 // Build up a buffer of hash code components. 1697 SmallVector<size_t, 8> HashComponents; 1698 HashComponents.reserve(MI->getNumOperands() + 1); 1699 HashComponents.push_back(MI->getOpcode()); 1700 for (const MachineOperand &MO : MI->operands()) { 1701 if (MO.isReg() && MO.isDef() && 1702 TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1703 continue; // Skip virtual register defs. 1704 1705 HashComponents.push_back(hash_value(MO)); 1706 } 1707 return hash_combine_range(HashComponents.begin(), HashComponents.end()); 1708 } 1709 1710 void MachineInstr::emitError(StringRef Msg) const { 1711 // Find the source location cookie. 1712 unsigned LocCookie = 0; 1713 const MDNode *LocMD = nullptr; 1714 for (unsigned i = getNumOperands(); i != 0; --i) { 1715 if (getOperand(i-1).isMetadata() && 1716 (LocMD = getOperand(i-1).getMetadata()) && 1717 LocMD->getNumOperands() != 0) { 1718 if (const ConstantInt *CI = 1719 mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) { 1720 LocCookie = CI->getZExtValue(); 1721 break; 1722 } 1723 } 1724 } 1725 1726 if (const MachineBasicBlock *MBB = getParent()) 1727 if (const MachineFunction *MF = MBB->getParent()) 1728 return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg); 1729 report_fatal_error(Msg); 1730 } 1731 1732 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL, 1733 const MCInstrDesc &MCID, bool IsIndirect, 1734 unsigned Reg, const MDNode *Variable, 1735 const MDNode *Expr) { 1736 assert(isa<DILocalVariable>(Variable) && "not a variable"); 1737 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 1738 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) && 1739 "Expected inlined-at fields to agree"); 1740 if (IsIndirect) 1741 return BuildMI(MF, DL, MCID) 1742 .addReg(Reg, RegState::Debug) 1743 .addImm(0U) 1744 .addMetadata(Variable) 1745 .addMetadata(Expr); 1746 else 1747 return BuildMI(MF, DL, MCID) 1748 .addReg(Reg, RegState::Debug) 1749 .addReg(0U, RegState::Debug) 1750 .addMetadata(Variable) 1751 .addMetadata(Expr); 1752 } 1753 1754 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB, 1755 MachineBasicBlock::iterator I, 1756 const DebugLoc &DL, const MCInstrDesc &MCID, 1757 bool IsIndirect, unsigned Reg, 1758 const MDNode *Variable, const MDNode *Expr) { 1759 assert(isa<DILocalVariable>(Variable) && "not a variable"); 1760 assert(cast<DIExpression>(Expr)->isValid() && "not an expression"); 1761 MachineFunction &MF = *BB.getParent(); 1762 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr); 1763 BB.insert(I, MI); 1764 return MachineInstrBuilder(MF, MI); 1765 } 1766 1767 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot. 1768 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE. 1769 static const DIExpression *computeExprForSpill(const MachineInstr &MI) { 1770 assert(MI.getOperand(0).isReg() && "can't spill non-register"); 1771 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) && 1772 "Expected inlined-at fields to agree"); 1773 1774 const DIExpression *Expr = MI.getDebugExpression(); 1775 if (MI.isIndirectDebugValue()) { 1776 assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset"); 1777 Expr = DIExpression::prepend(Expr, DIExpression::WithDeref); 1778 } 1779 return Expr; 1780 } 1781 1782 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB, 1783 MachineBasicBlock::iterator I, 1784 const MachineInstr &Orig, 1785 int FrameIndex) { 1786 const DIExpression *Expr = computeExprForSpill(Orig); 1787 return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc()) 1788 .addFrameIndex(FrameIndex) 1789 .addImm(0U) 1790 .addMetadata(Orig.getDebugVariable()) 1791 .addMetadata(Expr); 1792 } 1793 1794 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) { 1795 const DIExpression *Expr = computeExprForSpill(Orig); 1796 Orig.getOperand(0).ChangeToFrameIndex(FrameIndex); 1797 Orig.getOperand(1).ChangeToImmediate(0U); 1798 Orig.getOperand(3).setMetadata(Expr); 1799 } 1800