1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Methods common to all machine instructions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/FoldingSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallString.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/Loads.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineInstrBundle.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineOperand.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/PseudoSourceValue.h"
36 #include "llvm/CodeGen/TargetInstrInfo.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/CodeGen/TargetSubtargetInfo.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DebugInfoMetadata.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/InlineAsm.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/ModuleSlotTracker.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/MC/MCInstrDesc.h"
54 #include "llvm/MC/MCRegisterInfo.h"
55 #include "llvm/MC/MCSymbol.h"
56 #include "llvm/Support/Casting.h"
57 #include "llvm/Support/CommandLine.h"
58 #include "llvm/Support/Compiler.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/LowLevelTypeImpl.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Target/TargetIntrinsicInfo.h"
65 #include "llvm/Target/TargetMachine.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstddef>
69 #include <cstdint>
70 #include <cstring>
71 #include <iterator>
72 #include <utility>
73 
74 using namespace llvm;
75 
76 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
77   if (MCID->ImplicitDefs)
78     for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
79            ++ImpDefs)
80       addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
81   if (MCID->ImplicitUses)
82     for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
83            ++ImpUses)
84       addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
85 }
86 
87 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
88 /// implicit operands. It reserves space for the number of operands specified by
89 /// the MCInstrDesc.
90 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
91                            DebugLoc dl, bool NoImp)
92     : MCID(&tid), debugLoc(std::move(dl)) {
93   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
94 
95   // Reserve space for the expected number of operands.
96   if (unsigned NumOps = MCID->getNumOperands() +
97     MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
98     CapOperands = OperandCapacity::get(NumOps);
99     Operands = MF.allocateOperandArray(CapOperands);
100   }
101 
102   if (!NoImp)
103     addImplicitDefUseOperands(MF);
104 }
105 
106 /// MachineInstr ctor - Copies MachineInstr arg exactly
107 ///
108 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
109     : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
110       debugLoc(MI.getDebugLoc()) {
111   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
112 
113   CapOperands = OperandCapacity::get(MI.getNumOperands());
114   Operands = MF.allocateOperandArray(CapOperands);
115 
116   // Copy operands.
117   for (const MachineOperand &MO : MI.operands())
118     addOperand(MF, MO);
119 
120   // Copy all the sensible flags.
121   setFlags(MI.Flags);
122 }
123 
124 /// getRegInfo - If this instruction is embedded into a MachineFunction,
125 /// return the MachineRegisterInfo object for the current function, otherwise
126 /// return null.
127 MachineRegisterInfo *MachineInstr::getRegInfo() {
128   if (MachineBasicBlock *MBB = getParent())
129     return &MBB->getParent()->getRegInfo();
130   return nullptr;
131 }
132 
133 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
134 /// this instruction from their respective use lists.  This requires that the
135 /// operands already be on their use lists.
136 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
137   for (MachineOperand &MO : operands())
138     if (MO.isReg())
139       MRI.removeRegOperandFromUseList(&MO);
140 }
141 
142 /// AddRegOperandsToUseLists - Add all of the register operands in
143 /// this instruction from their respective use lists.  This requires that the
144 /// operands not be on their use lists yet.
145 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
146   for (MachineOperand &MO : operands())
147     if (MO.isReg())
148       MRI.addRegOperandToUseList(&MO);
149 }
150 
151 void MachineInstr::addOperand(const MachineOperand &Op) {
152   MachineBasicBlock *MBB = getParent();
153   assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
154   MachineFunction *MF = MBB->getParent();
155   assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
156   addOperand(*MF, Op);
157 }
158 
159 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
160 /// ranges. If MRI is non-null also update use-def chains.
161 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
162                          unsigned NumOps, MachineRegisterInfo *MRI) {
163   if (MRI)
164     return MRI->moveOperands(Dst, Src, NumOps);
165 
166   // MachineOperand is a trivially copyable type so we can just use memmove.
167   std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
168 }
169 
170 /// addOperand - Add the specified operand to the instruction.  If it is an
171 /// implicit operand, it is added to the end of the operand list.  If it is
172 /// an explicit operand it is added at the end of the explicit operand list
173 /// (before the first implicit operand).
174 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
175   assert(MCID && "Cannot add operands before providing an instr descriptor");
176 
177   // Check if we're adding one of our existing operands.
178   if (&Op >= Operands && &Op < Operands + NumOperands) {
179     // This is unusual: MI->addOperand(MI->getOperand(i)).
180     // If adding Op requires reallocating or moving existing operands around,
181     // the Op reference could go stale. Support it by copying Op.
182     MachineOperand CopyOp(Op);
183     return addOperand(MF, CopyOp);
184   }
185 
186   // Find the insert location for the new operand.  Implicit registers go at
187   // the end, everything else goes before the implicit regs.
188   //
189   // FIXME: Allow mixed explicit and implicit operands on inline asm.
190   // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
191   // implicit-defs, but they must not be moved around.  See the FIXME in
192   // InstrEmitter.cpp.
193   unsigned OpNo = getNumOperands();
194   bool isImpReg = Op.isReg() && Op.isImplicit();
195   if (!isImpReg && !isInlineAsm()) {
196     while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
197       --OpNo;
198       assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
199     }
200   }
201 
202 #ifndef NDEBUG
203   bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
204   // OpNo now points as the desired insertion point.  Unless this is a variadic
205   // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
206   // RegMask operands go between the explicit and implicit operands.
207   assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
208           OpNo < MCID->getNumOperands() || isMetaDataOp) &&
209          "Trying to add an operand to a machine instr that is already done!");
210 #endif
211 
212   MachineRegisterInfo *MRI = getRegInfo();
213 
214   // Determine if the Operands array needs to be reallocated.
215   // Save the old capacity and operand array.
216   OperandCapacity OldCap = CapOperands;
217   MachineOperand *OldOperands = Operands;
218   if (!OldOperands || OldCap.getSize() == getNumOperands()) {
219     CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
220     Operands = MF.allocateOperandArray(CapOperands);
221     // Move the operands before the insertion point.
222     if (OpNo)
223       moveOperands(Operands, OldOperands, OpNo, MRI);
224   }
225 
226   // Move the operands following the insertion point.
227   if (OpNo != NumOperands)
228     moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
229                  MRI);
230   ++NumOperands;
231 
232   // Deallocate the old operand array.
233   if (OldOperands != Operands && OldOperands)
234     MF.deallocateOperandArray(OldCap, OldOperands);
235 
236   // Copy Op into place. It still needs to be inserted into the MRI use lists.
237   MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
238   NewMO->ParentMI = this;
239 
240   // When adding a register operand, tell MRI about it.
241   if (NewMO->isReg()) {
242     // Ensure isOnRegUseList() returns false, regardless of Op's status.
243     NewMO->Contents.Reg.Prev = nullptr;
244     // Ignore existing ties. This is not a property that can be copied.
245     NewMO->TiedTo = 0;
246     // Add the new operand to MRI, but only for instructions in an MBB.
247     if (MRI)
248       MRI->addRegOperandToUseList(NewMO);
249     // The MCID operand information isn't accurate until we start adding
250     // explicit operands. The implicit operands are added first, then the
251     // explicits are inserted before them.
252     if (!isImpReg) {
253       // Tie uses to defs as indicated in MCInstrDesc.
254       if (NewMO->isUse()) {
255         int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
256         if (DefIdx != -1)
257           tieOperands(DefIdx, OpNo);
258       }
259       // If the register operand is flagged as early, mark the operand as such.
260       if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
261         NewMO->setIsEarlyClobber(true);
262     }
263   }
264 }
265 
266 /// RemoveOperand - Erase an operand  from an instruction, leaving it with one
267 /// fewer operand than it started with.
268 ///
269 void MachineInstr::RemoveOperand(unsigned OpNo) {
270   assert(OpNo < getNumOperands() && "Invalid operand number");
271   untieRegOperand(OpNo);
272 
273 #ifndef NDEBUG
274   // Moving tied operands would break the ties.
275   for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
276     if (Operands[i].isReg())
277       assert(!Operands[i].isTied() && "Cannot move tied operands");
278 #endif
279 
280   MachineRegisterInfo *MRI = getRegInfo();
281   if (MRI && Operands[OpNo].isReg())
282     MRI->removeRegOperandFromUseList(Operands + OpNo);
283 
284   // Don't call the MachineOperand destructor. A lot of this code depends on
285   // MachineOperand having a trivial destructor anyway, and adding a call here
286   // wouldn't make it 'destructor-correct'.
287 
288   if (unsigned N = NumOperands - 1 - OpNo)
289     moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
290   --NumOperands;
291 }
292 
293 /// addMemOperand - Add a MachineMemOperand to the machine instruction.
294 /// This function should be used only occasionally. The setMemRefs function
295 /// is the primary method for setting up a MachineInstr's MemRefs list.
296 void MachineInstr::addMemOperand(MachineFunction &MF,
297                                  MachineMemOperand *MO) {
298   mmo_iterator OldMemRefs = MemRefs;
299   unsigned OldNumMemRefs = NumMemRefs;
300 
301   unsigned NewNum = NumMemRefs + 1;
302   mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
303 
304   std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
305   NewMemRefs[NewNum - 1] = MO;
306   setMemRefs(NewMemRefs, NewMemRefs + NewNum);
307 }
308 
309 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
310 /// identical.
311 static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) {
312   auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end();
313   auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end();
314   if ((E1 - I1) != (E2 - I2))
315     return false;
316   for (; I1 != E1; ++I1, ++I2) {
317     if (**I1 != **I2)
318       return false;
319   }
320   return true;
321 }
322 
323 std::pair<MachineInstr::mmo_iterator, unsigned>
324 MachineInstr::mergeMemRefsWith(const MachineInstr& Other) {
325 
326   // If either of the incoming memrefs are empty, we must be conservative and
327   // treat this as if we've exhausted our space for memrefs and dropped them.
328   if (memoperands_empty() || Other.memoperands_empty())
329     return std::make_pair(nullptr, 0);
330 
331   // If both instructions have identical memrefs, we don't need to merge them.
332   // Since many instructions have a single memref, and we tend to merge things
333   // like pairs of loads from the same location, this catches a large number of
334   // cases in practice.
335   if (hasIdenticalMMOs(*this, Other))
336     return std::make_pair(MemRefs, NumMemRefs);
337 
338   // TODO: consider uniquing elements within the operand lists to reduce
339   // space usage and fall back to conservative information less often.
340   size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs;
341 
342   // If we don't have enough room to store this many memrefs, be conservative
343   // and drop them.  Otherwise, we'd fail asserts when trying to add them to
344   // the new instruction.
345   if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs))
346     return std::make_pair(nullptr, 0);
347 
348   MachineFunction *MF = getMF();
349   mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs);
350   mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(),
351                                   MemBegin);
352   MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(),
353                      MemEnd);
354   assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs &&
355          "missing memrefs");
356 
357   return std::make_pair(MemBegin, CombinedNumMemRefs);
358 }
359 
360 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
361   assert(!isBundledWithPred() && "Must be called on bundle header");
362   for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
363     if (MII->getDesc().getFlags() & Mask) {
364       if (Type == AnyInBundle)
365         return true;
366     } else {
367       if (Type == AllInBundle && !MII->isBundle())
368         return false;
369     }
370     // This was the last instruction in the bundle.
371     if (!MII->isBundledWithSucc())
372       return Type == AllInBundle;
373   }
374 }
375 
376 bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
377                                  MICheckType Check) const {
378   // If opcodes or number of operands are not the same then the two
379   // instructions are obviously not identical.
380   if (Other.getOpcode() != getOpcode() ||
381       Other.getNumOperands() != getNumOperands())
382     return false;
383 
384   if (isBundle()) {
385     // We have passed the test above that both instructions have the same
386     // opcode, so we know that both instructions are bundles here. Let's compare
387     // MIs inside the bundle.
388     assert(Other.isBundle() && "Expected that both instructions are bundles.");
389     MachineBasicBlock::const_instr_iterator I1 = getIterator();
390     MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
391     // Loop until we analysed the last intruction inside at least one of the
392     // bundles.
393     while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
394       ++I1;
395       ++I2;
396       if (!I1->isIdenticalTo(*I2, Check))
397         return false;
398     }
399     // If we've reached the end of just one of the two bundles, but not both,
400     // the instructions are not identical.
401     if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
402       return false;
403   }
404 
405   // Check operands to make sure they match.
406   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
407     const MachineOperand &MO = getOperand(i);
408     const MachineOperand &OMO = Other.getOperand(i);
409     if (!MO.isReg()) {
410       if (!MO.isIdenticalTo(OMO))
411         return false;
412       continue;
413     }
414 
415     // Clients may or may not want to ignore defs when testing for equality.
416     // For example, machine CSE pass only cares about finding common
417     // subexpressions, so it's safe to ignore virtual register defs.
418     if (MO.isDef()) {
419       if (Check == IgnoreDefs)
420         continue;
421       else if (Check == IgnoreVRegDefs) {
422         if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
423             !TargetRegisterInfo::isVirtualRegister(OMO.getReg()))
424           if (!MO.isIdenticalTo(OMO))
425             return false;
426       } else {
427         if (!MO.isIdenticalTo(OMO))
428           return false;
429         if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
430           return false;
431       }
432     } else {
433       if (!MO.isIdenticalTo(OMO))
434         return false;
435       if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
436         return false;
437     }
438   }
439   // If DebugLoc does not match then two dbg.values are not identical.
440   if (isDebugValue())
441     if (getDebugLoc() && Other.getDebugLoc() &&
442         getDebugLoc() != Other.getDebugLoc())
443       return false;
444   return true;
445 }
446 
447 const MachineFunction *MachineInstr::getMF() const {
448   return getParent()->getParent();
449 }
450 
451 MachineInstr *MachineInstr::removeFromParent() {
452   assert(getParent() && "Not embedded in a basic block!");
453   return getParent()->remove(this);
454 }
455 
456 MachineInstr *MachineInstr::removeFromBundle() {
457   assert(getParent() && "Not embedded in a basic block!");
458   return getParent()->remove_instr(this);
459 }
460 
461 void MachineInstr::eraseFromParent() {
462   assert(getParent() && "Not embedded in a basic block!");
463   getParent()->erase(this);
464 }
465 
466 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
467   assert(getParent() && "Not embedded in a basic block!");
468   MachineBasicBlock *MBB = getParent();
469   MachineFunction *MF = MBB->getParent();
470   assert(MF && "Not embedded in a function!");
471 
472   MachineInstr *MI = (MachineInstr *)this;
473   MachineRegisterInfo &MRI = MF->getRegInfo();
474 
475   for (const MachineOperand &MO : MI->operands()) {
476     if (!MO.isReg() || !MO.isDef())
477       continue;
478     unsigned Reg = MO.getReg();
479     if (!TargetRegisterInfo::isVirtualRegister(Reg))
480       continue;
481     MRI.markUsesInDebugValueAsUndef(Reg);
482   }
483   MI->eraseFromParent();
484 }
485 
486 void MachineInstr::eraseFromBundle() {
487   assert(getParent() && "Not embedded in a basic block!");
488   getParent()->erase_instr(this);
489 }
490 
491 /// getNumExplicitOperands - Returns the number of non-implicit operands.
492 ///
493 unsigned MachineInstr::getNumExplicitOperands() const {
494   unsigned NumOperands = MCID->getNumOperands();
495   if (!MCID->isVariadic())
496     return NumOperands;
497 
498   for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) {
499     const MachineOperand &MO = getOperand(i);
500     if (!MO.isReg() || !MO.isImplicit())
501       NumOperands++;
502   }
503   return NumOperands;
504 }
505 
506 void MachineInstr::bundleWithPred() {
507   assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
508   setFlag(BundledPred);
509   MachineBasicBlock::instr_iterator Pred = getIterator();
510   --Pred;
511   assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
512   Pred->setFlag(BundledSucc);
513 }
514 
515 void MachineInstr::bundleWithSucc() {
516   assert(!isBundledWithSucc() && "MI is already bundled with its successor");
517   setFlag(BundledSucc);
518   MachineBasicBlock::instr_iterator Succ = getIterator();
519   ++Succ;
520   assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
521   Succ->setFlag(BundledPred);
522 }
523 
524 void MachineInstr::unbundleFromPred() {
525   assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
526   clearFlag(BundledPred);
527   MachineBasicBlock::instr_iterator Pred = getIterator();
528   --Pred;
529   assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
530   Pred->clearFlag(BundledSucc);
531 }
532 
533 void MachineInstr::unbundleFromSucc() {
534   assert(isBundledWithSucc() && "MI isn't bundled with its successor");
535   clearFlag(BundledSucc);
536   MachineBasicBlock::instr_iterator Succ = getIterator();
537   ++Succ;
538   assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
539   Succ->clearFlag(BundledPred);
540 }
541 
542 bool MachineInstr::isStackAligningInlineAsm() const {
543   if (isInlineAsm()) {
544     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
545     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
546       return true;
547   }
548   return false;
549 }
550 
551 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
552   assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
553   unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
554   return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
555 }
556 
557 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
558                                        unsigned *GroupNo) const {
559   assert(isInlineAsm() && "Expected an inline asm instruction");
560   assert(OpIdx < getNumOperands() && "OpIdx out of range");
561 
562   // Ignore queries about the initial operands.
563   if (OpIdx < InlineAsm::MIOp_FirstOperand)
564     return -1;
565 
566   unsigned Group = 0;
567   unsigned NumOps;
568   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
569        i += NumOps) {
570     const MachineOperand &FlagMO = getOperand(i);
571     // If we reach the implicit register operands, stop looking.
572     if (!FlagMO.isImm())
573       return -1;
574     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
575     if (i + NumOps > OpIdx) {
576       if (GroupNo)
577         *GroupNo = Group;
578       return i;
579     }
580     ++Group;
581   }
582   return -1;
583 }
584 
585 const DILocalVariable *MachineInstr::getDebugVariable() const {
586   assert(isDebugValue() && "not a DBG_VALUE");
587   return cast<DILocalVariable>(getOperand(2).getMetadata());
588 }
589 
590 const DIExpression *MachineInstr::getDebugExpression() const {
591   assert(isDebugValue() && "not a DBG_VALUE");
592   return cast<DIExpression>(getOperand(3).getMetadata());
593 }
594 
595 const TargetRegisterClass*
596 MachineInstr::getRegClassConstraint(unsigned OpIdx,
597                                     const TargetInstrInfo *TII,
598                                     const TargetRegisterInfo *TRI) const {
599   assert(getParent() && "Can't have an MBB reference here!");
600   assert(getMF() && "Can't have an MF reference here!");
601   const MachineFunction &MF = *getMF();
602 
603   // Most opcodes have fixed constraints in their MCInstrDesc.
604   if (!isInlineAsm())
605     return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
606 
607   if (!getOperand(OpIdx).isReg())
608     return nullptr;
609 
610   // For tied uses on inline asm, get the constraint from the def.
611   unsigned DefIdx;
612   if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
613     OpIdx = DefIdx;
614 
615   // Inline asm stores register class constraints in the flag word.
616   int FlagIdx = findInlineAsmFlagIdx(OpIdx);
617   if (FlagIdx < 0)
618     return nullptr;
619 
620   unsigned Flag = getOperand(FlagIdx).getImm();
621   unsigned RCID;
622   if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
623        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
624        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
625       InlineAsm::hasRegClassConstraint(Flag, RCID))
626     return TRI->getRegClass(RCID);
627 
628   // Assume that all registers in a memory operand are pointers.
629   if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
630     return TRI->getPointerRegClass(MF);
631 
632   return nullptr;
633 }
634 
635 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
636     unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
637     const TargetRegisterInfo *TRI, bool ExploreBundle) const {
638   // Check every operands inside the bundle if we have
639   // been asked to.
640   if (ExploreBundle)
641     for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
642          ++OpndIt)
643       CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
644           OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
645   else
646     // Otherwise, just check the current operands.
647     for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
648       CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
649   return CurRC;
650 }
651 
652 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
653     unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
654     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
655   assert(CurRC && "Invalid initial register class");
656   // Check if Reg is constrained by some of its use/def from MI.
657   const MachineOperand &MO = getOperand(OpIdx);
658   if (!MO.isReg() || MO.getReg() != Reg)
659     return CurRC;
660   // If yes, accumulate the constraints through the operand.
661   return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
662 }
663 
664 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
665     unsigned OpIdx, const TargetRegisterClass *CurRC,
666     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
667   const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
668   const MachineOperand &MO = getOperand(OpIdx);
669   assert(MO.isReg() &&
670          "Cannot get register constraints for non-register operand");
671   assert(CurRC && "Invalid initial register class");
672   if (unsigned SubIdx = MO.getSubReg()) {
673     if (OpRC)
674       CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
675     else
676       CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
677   } else if (OpRC)
678     CurRC = TRI->getCommonSubClass(CurRC, OpRC);
679   return CurRC;
680 }
681 
682 /// Return the number of instructions inside the MI bundle, not counting the
683 /// header instruction.
684 unsigned MachineInstr::getBundleSize() const {
685   MachineBasicBlock::const_instr_iterator I = getIterator();
686   unsigned Size = 0;
687   while (I->isBundledWithSucc()) {
688     ++Size;
689     ++I;
690   }
691   return Size;
692 }
693 
694 /// Returns true if the MachineInstr has an implicit-use operand of exactly
695 /// the given register (not considering sub/super-registers).
696 bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const {
697   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
698     const MachineOperand &MO = getOperand(i);
699     if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
700       return true;
701   }
702   return false;
703 }
704 
705 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
706 /// the specific register or -1 if it is not found. It further tightens
707 /// the search criteria to a use that kills the register if isKill is true.
708 int MachineInstr::findRegisterUseOperandIdx(
709     unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const {
710   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
711     const MachineOperand &MO = getOperand(i);
712     if (!MO.isReg() || !MO.isUse())
713       continue;
714     unsigned MOReg = MO.getReg();
715     if (!MOReg)
716       continue;
717     if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) &&
718                          TargetRegisterInfo::isPhysicalRegister(Reg) &&
719                          TRI->isSubRegister(MOReg, Reg)))
720       if (!isKill || MO.isKill())
721         return i;
722   }
723   return -1;
724 }
725 
726 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
727 /// indicating if this instruction reads or writes Reg. This also considers
728 /// partial defines.
729 std::pair<bool,bool>
730 MachineInstr::readsWritesVirtualRegister(unsigned Reg,
731                                          SmallVectorImpl<unsigned> *Ops) const {
732   bool PartDef = false; // Partial redefine.
733   bool FullDef = false; // Full define.
734   bool Use = false;
735 
736   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
737     const MachineOperand &MO = getOperand(i);
738     if (!MO.isReg() || MO.getReg() != Reg)
739       continue;
740     if (Ops)
741       Ops->push_back(i);
742     if (MO.isUse())
743       Use |= !MO.isUndef();
744     else if (MO.getSubReg() && !MO.isUndef())
745       // A partial <def,undef> doesn't count as reading the register.
746       PartDef = true;
747     else
748       FullDef = true;
749   }
750   // A partial redefine uses Reg unless there is also a full define.
751   return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
752 }
753 
754 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
755 /// the specified register or -1 if it is not found. If isDead is true, defs
756 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
757 /// also checks if there is a def of a super-register.
758 int
759 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
760                                         const TargetRegisterInfo *TRI) const {
761   bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
762   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
763     const MachineOperand &MO = getOperand(i);
764     // Accept regmask operands when Overlap is set.
765     // Ignore them when looking for a specific def operand (Overlap == false).
766     if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
767       return i;
768     if (!MO.isReg() || !MO.isDef())
769       continue;
770     unsigned MOReg = MO.getReg();
771     bool Found = (MOReg == Reg);
772     if (!Found && TRI && isPhys &&
773         TargetRegisterInfo::isPhysicalRegister(MOReg)) {
774       if (Overlap)
775         Found = TRI->regsOverlap(MOReg, Reg);
776       else
777         Found = TRI->isSubRegister(MOReg, Reg);
778     }
779     if (Found && (!isDead || MO.isDead()))
780       return i;
781   }
782   return -1;
783 }
784 
785 /// findFirstPredOperandIdx() - Find the index of the first operand in the
786 /// operand list that is used to represent the predicate. It returns -1 if
787 /// none is found.
788 int MachineInstr::findFirstPredOperandIdx() const {
789   // Don't call MCID.findFirstPredOperandIdx() because this variant
790   // is sometimes called on an instruction that's not yet complete, and
791   // so the number of operands is less than the MCID indicates. In
792   // particular, the PTX target does this.
793   const MCInstrDesc &MCID = getDesc();
794   if (MCID.isPredicable()) {
795     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
796       if (MCID.OpInfo[i].isPredicate())
797         return i;
798   }
799 
800   return -1;
801 }
802 
803 // MachineOperand::TiedTo is 4 bits wide.
804 const unsigned TiedMax = 15;
805 
806 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
807 ///
808 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
809 /// field. TiedTo can have these values:
810 ///
811 /// 0:              Operand is not tied to anything.
812 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
813 /// TiedMax:        Tied to an operand >= TiedMax-1.
814 ///
815 /// The tied def must be one of the first TiedMax operands on a normal
816 /// instruction. INLINEASM instructions allow more tied defs.
817 ///
818 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
819   MachineOperand &DefMO = getOperand(DefIdx);
820   MachineOperand &UseMO = getOperand(UseIdx);
821   assert(DefMO.isDef() && "DefIdx must be a def operand");
822   assert(UseMO.isUse() && "UseIdx must be a use operand");
823   assert(!DefMO.isTied() && "Def is already tied to another use");
824   assert(!UseMO.isTied() && "Use is already tied to another def");
825 
826   if (DefIdx < TiedMax)
827     UseMO.TiedTo = DefIdx + 1;
828   else {
829     // Inline asm can use the group descriptors to find tied operands, but on
830     // normal instruction, the tied def must be within the first TiedMax
831     // operands.
832     assert(isInlineAsm() && "DefIdx out of range");
833     UseMO.TiedTo = TiedMax;
834   }
835 
836   // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
837   DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
838 }
839 
840 /// Given the index of a tied register operand, find the operand it is tied to.
841 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
842 /// which must exist.
843 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
844   const MachineOperand &MO = getOperand(OpIdx);
845   assert(MO.isTied() && "Operand isn't tied");
846 
847   // Normally TiedTo is in range.
848   if (MO.TiedTo < TiedMax)
849     return MO.TiedTo - 1;
850 
851   // Uses on normal instructions can be out of range.
852   if (!isInlineAsm()) {
853     // Normal tied defs must be in the 0..TiedMax-1 range.
854     if (MO.isUse())
855       return TiedMax - 1;
856     // MO is a def. Search for the tied use.
857     for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
858       const MachineOperand &UseMO = getOperand(i);
859       if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
860         return i;
861     }
862     llvm_unreachable("Can't find tied use");
863   }
864 
865   // Now deal with inline asm by parsing the operand group descriptor flags.
866   // Find the beginning of each operand group.
867   SmallVector<unsigned, 8> GroupIdx;
868   unsigned OpIdxGroup = ~0u;
869   unsigned NumOps;
870   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
871        i += NumOps) {
872     const MachineOperand &FlagMO = getOperand(i);
873     assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
874     unsigned CurGroup = GroupIdx.size();
875     GroupIdx.push_back(i);
876     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
877     // OpIdx belongs to this operand group.
878     if (OpIdx > i && OpIdx < i + NumOps)
879       OpIdxGroup = CurGroup;
880     unsigned TiedGroup;
881     if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
882       continue;
883     // Operands in this group are tied to operands in TiedGroup which must be
884     // earlier. Find the number of operands between the two groups.
885     unsigned Delta = i - GroupIdx[TiedGroup];
886 
887     // OpIdx is a use tied to TiedGroup.
888     if (OpIdxGroup == CurGroup)
889       return OpIdx - Delta;
890 
891     // OpIdx is a def tied to this use group.
892     if (OpIdxGroup == TiedGroup)
893       return OpIdx + Delta;
894   }
895   llvm_unreachable("Invalid tied operand on inline asm");
896 }
897 
898 /// clearKillInfo - Clears kill flags on all operands.
899 ///
900 void MachineInstr::clearKillInfo() {
901   for (MachineOperand &MO : operands()) {
902     if (MO.isReg() && MO.isUse())
903       MO.setIsKill(false);
904   }
905 }
906 
907 void MachineInstr::substituteRegister(unsigned FromReg,
908                                       unsigned ToReg,
909                                       unsigned SubIdx,
910                                       const TargetRegisterInfo &RegInfo) {
911   if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
912     if (SubIdx)
913       ToReg = RegInfo.getSubReg(ToReg, SubIdx);
914     for (MachineOperand &MO : operands()) {
915       if (!MO.isReg() || MO.getReg() != FromReg)
916         continue;
917       MO.substPhysReg(ToReg, RegInfo);
918     }
919   } else {
920     for (MachineOperand &MO : operands()) {
921       if (!MO.isReg() || MO.getReg() != FromReg)
922         continue;
923       MO.substVirtReg(ToReg, SubIdx, RegInfo);
924     }
925   }
926 }
927 
928 /// isSafeToMove - Return true if it is safe to move this instruction. If
929 /// SawStore is set to true, it means that there is a store (or call) between
930 /// the instruction's location and its intended destination.
931 bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
932   // Ignore stuff that we obviously can't move.
933   //
934   // Treat volatile loads as stores. This is not strictly necessary for
935   // volatiles, but it is required for atomic loads. It is not allowed to move
936   // a load across an atomic load with Ordering > Monotonic.
937   if (mayStore() || isCall() || isPHI() ||
938       (mayLoad() && hasOrderedMemoryRef())) {
939     SawStore = true;
940     return false;
941   }
942 
943   if (isPosition() || isDebugValue() || isTerminator() ||
944       hasUnmodeledSideEffects())
945     return false;
946 
947   // See if this instruction does a load.  If so, we have to guarantee that the
948   // loaded value doesn't change between the load and the its intended
949   // destination. The check for isInvariantLoad gives the targe the chance to
950   // classify the load as always returning a constant, e.g. a constant pool
951   // load.
952   if (mayLoad() && !isDereferenceableInvariantLoad(AA))
953     // Otherwise, this is a real load.  If there is a store between the load and
954     // end of block, we can't move it.
955     return !SawStore;
956 
957   return true;
958 }
959 
960 bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other,
961                             bool UseTBAA) {
962   const MachineFunction *MF = getMF();
963   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
964   const MachineFrameInfo &MFI = MF->getFrameInfo();
965 
966   // If neither instruction stores to memory, they can't alias in any
967   // meaningful way, even if they read from the same address.
968   if (!mayStore() && !Other.mayStore())
969     return false;
970 
971   // Let the target decide if memory accesses cannot possibly overlap.
972   if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA))
973     return false;
974 
975   // FIXME: Need to handle multiple memory operands to support all targets.
976   if (!hasOneMemOperand() || !Other.hasOneMemOperand())
977     return true;
978 
979   MachineMemOperand *MMOa = *memoperands_begin();
980   MachineMemOperand *MMOb = *Other.memoperands_begin();
981 
982   // The following interface to AA is fashioned after DAGCombiner::isAlias
983   // and operates with MachineMemOperand offset with some important
984   // assumptions:
985   //   - LLVM fundamentally assumes flat address spaces.
986   //   - MachineOperand offset can *only* result from legalization and
987   //     cannot affect queries other than the trivial case of overlap
988   //     checking.
989   //   - These offsets never wrap and never step outside
990   //     of allocated objects.
991   //   - There should never be any negative offsets here.
992   //
993   // FIXME: Modify API to hide this math from "user"
994   // Even before we go to AA we can reason locally about some
995   // memory objects. It can save compile time, and possibly catch some
996   // corner cases not currently covered.
997 
998   int64_t OffsetA = MMOa->getOffset();
999   int64_t OffsetB = MMOb->getOffset();
1000 
1001   int64_t MinOffset = std::min(OffsetA, OffsetB);
1002   int64_t WidthA = MMOa->getSize();
1003   int64_t WidthB = MMOb->getSize();
1004   const Value *ValA = MMOa->getValue();
1005   const Value *ValB = MMOb->getValue();
1006   bool SameVal = (ValA && ValB && (ValA == ValB));
1007   if (!SameVal) {
1008     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1009     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1010     if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1011       return false;
1012     if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1013       return false;
1014     if (PSVa && PSVb && (PSVa == PSVb))
1015       SameVal = true;
1016   }
1017 
1018   if (SameVal) {
1019     int64_t MaxOffset = std::max(OffsetA, OffsetB);
1020     int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1021     return (MinOffset + LowWidth > MaxOffset);
1022   }
1023 
1024   if (!AA)
1025     return true;
1026 
1027   if (!ValA || !ValB)
1028     return true;
1029 
1030   assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1031   assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1032 
1033   int64_t Overlapa = WidthA + OffsetA - MinOffset;
1034   int64_t Overlapb = WidthB + OffsetB - MinOffset;
1035 
1036   AliasResult AAResult = AA->alias(
1037       MemoryLocation(ValA, Overlapa,
1038                      UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1039       MemoryLocation(ValB, Overlapb,
1040                      UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1041 
1042   return (AAResult != NoAlias);
1043 }
1044 
1045 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1046 /// or volatile memory reference, or if the information describing the memory
1047 /// reference is not available. Return false if it is known to have no ordered
1048 /// memory references.
1049 bool MachineInstr::hasOrderedMemoryRef() const {
1050   // An instruction known never to access memory won't have a volatile access.
1051   if (!mayStore() &&
1052       !mayLoad() &&
1053       !isCall() &&
1054       !hasUnmodeledSideEffects())
1055     return false;
1056 
1057   // Otherwise, if the instruction has no memory reference information,
1058   // conservatively assume it wasn't preserved.
1059   if (memoperands_empty())
1060     return true;
1061 
1062   // Check if any of our memory operands are ordered.
1063   return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1064     return !MMO->isUnordered();
1065   });
1066 }
1067 
1068 /// isDereferenceableInvariantLoad - Return true if this instruction will never
1069 /// trap and is loading from a location whose value is invariant across a run of
1070 /// this function.
1071 bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
1072   // If the instruction doesn't load at all, it isn't an invariant load.
1073   if (!mayLoad())
1074     return false;
1075 
1076   // If the instruction has lost its memoperands, conservatively assume that
1077   // it may not be an invariant load.
1078   if (memoperands_empty())
1079     return false;
1080 
1081   const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1082 
1083   for (MachineMemOperand *MMO : memoperands()) {
1084     if (MMO->isVolatile()) return false;
1085     if (MMO->isStore()) return false;
1086     if (MMO->isInvariant() && MMO->isDereferenceable())
1087       continue;
1088 
1089     // A load from a constant PseudoSourceValue is invariant.
1090     if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1091       if (PSV->isConstant(&MFI))
1092         continue;
1093 
1094     if (const Value *V = MMO->getValue()) {
1095       // If we have an AliasAnalysis, ask it whether the memory is constant.
1096       if (AA &&
1097           AA->pointsToConstantMemory(
1098               MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
1099         continue;
1100     }
1101 
1102     // Otherwise assume conservatively.
1103     return false;
1104   }
1105 
1106   // Everything checks out.
1107   return true;
1108 }
1109 
1110 /// isConstantValuePHI - If the specified instruction is a PHI that always
1111 /// merges together the same virtual register, return the register, otherwise
1112 /// return 0.
1113 unsigned MachineInstr::isConstantValuePHI() const {
1114   if (!isPHI())
1115     return 0;
1116   assert(getNumOperands() >= 3 &&
1117          "It's illegal to have a PHI without source operands");
1118 
1119   unsigned Reg = getOperand(1).getReg();
1120   for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1121     if (getOperand(i).getReg() != Reg)
1122       return 0;
1123   return Reg;
1124 }
1125 
1126 bool MachineInstr::hasUnmodeledSideEffects() const {
1127   if (hasProperty(MCID::UnmodeledSideEffects))
1128     return true;
1129   if (isInlineAsm()) {
1130     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1131     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1132       return true;
1133   }
1134 
1135   return false;
1136 }
1137 
1138 bool MachineInstr::isLoadFoldBarrier() const {
1139   return mayStore() || isCall() || hasUnmodeledSideEffects();
1140 }
1141 
1142 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1143 ///
1144 bool MachineInstr::allDefsAreDead() const {
1145   for (const MachineOperand &MO : operands()) {
1146     if (!MO.isReg() || MO.isUse())
1147       continue;
1148     if (!MO.isDead())
1149       return false;
1150   }
1151   return true;
1152 }
1153 
1154 /// copyImplicitOps - Copy implicit register operands from specified
1155 /// instruction to this instruction.
1156 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1157                                    const MachineInstr &MI) {
1158   for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
1159        i != e; ++i) {
1160     const MachineOperand &MO = MI.getOperand(i);
1161     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1162       addOperand(MF, MO);
1163   }
1164 }
1165 
1166 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1167 LLVM_DUMP_METHOD void MachineInstr::dump() const {
1168   dbgs() << "  ";
1169   print(dbgs());
1170 }
1171 #endif
1172 
1173 void MachineInstr::print(raw_ostream &OS, bool SkipOpers, bool SkipDebugLoc,
1174                          const TargetInstrInfo *TII) const {
1175   const Module *M = nullptr;
1176   if (const MachineBasicBlock *MBB = getParent())
1177     if (const MachineFunction *MF = MBB->getParent())
1178       M = MF->getFunction()->getParent();
1179 
1180   ModuleSlotTracker MST(M);
1181   print(OS, MST, SkipOpers, SkipDebugLoc, TII);
1182 }
1183 
1184 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1185                          bool SkipOpers, bool SkipDebugLoc,
1186                          const TargetInstrInfo *TII) const {
1187   // We can be a bit tidier if we know the MachineFunction.
1188   const MachineFunction *MF = nullptr;
1189   const TargetRegisterInfo *TRI = nullptr;
1190   const MachineRegisterInfo *MRI = nullptr;
1191   const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1192 
1193   if (const MachineBasicBlock *MBB = getParent()) {
1194     MF = MBB->getParent();
1195     if (MF) {
1196       MRI = &MF->getRegInfo();
1197       TRI = MF->getSubtarget().getRegisterInfo();
1198       if (!TII)
1199         TII = MF->getSubtarget().getInstrInfo();
1200       IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
1201     }
1202   }
1203 
1204   // Save a list of virtual registers.
1205   SmallVector<unsigned, 8> VirtRegs;
1206 
1207   // Print explicitly defined operands on the left of an assignment syntax.
1208   unsigned StartOp = 0, e = getNumOperands();
1209   for (; StartOp < e && getOperand(StartOp).isReg() &&
1210          getOperand(StartOp).isDef() &&
1211          !getOperand(StartOp).isImplicit();
1212        ++StartOp) {
1213     if (StartOp != 0) OS << ", ";
1214     getOperand(StartOp).print(OS, MST, TRI, IntrinsicInfo);
1215     unsigned Reg = getOperand(StartOp).getReg();
1216     if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1217       VirtRegs.push_back(Reg);
1218       LLT Ty = MRI ? MRI->getType(Reg) : LLT{};
1219       if (Ty.isValid())
1220         OS << '(' << Ty << ')';
1221     }
1222   }
1223 
1224   if (StartOp != 0)
1225     OS << " = ";
1226 
1227   // Print the opcode name.
1228   if (TII)
1229     OS << TII->getName(getOpcode());
1230   else
1231     OS << "UNKNOWN";
1232 
1233   if (SkipOpers)
1234     return;
1235 
1236   // Print the rest of the operands.
1237   bool FirstOp = true;
1238   unsigned AsmDescOp = ~0u;
1239   unsigned AsmOpCount = 0;
1240 
1241   if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1242     // Print asm string.
1243     OS << " ";
1244     getOperand(InlineAsm::MIOp_AsmString).print(OS, MST, TRI);
1245 
1246     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1247     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1248     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1249       OS << " [sideeffect]";
1250     if (ExtraInfo & InlineAsm::Extra_MayLoad)
1251       OS << " [mayload]";
1252     if (ExtraInfo & InlineAsm::Extra_MayStore)
1253       OS << " [maystore]";
1254     if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1255       OS << " [isconvergent]";
1256     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1257       OS << " [alignstack]";
1258     if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1259       OS << " [attdialect]";
1260     if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1261       OS << " [inteldialect]";
1262 
1263     StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1264     FirstOp = false;
1265   }
1266 
1267   for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1268     const MachineOperand &MO = getOperand(i);
1269 
1270     if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1271       VirtRegs.push_back(MO.getReg());
1272 
1273     if (FirstOp) FirstOp = false; else OS << ",";
1274     OS << " ";
1275     if (i < getDesc().NumOperands) {
1276       const MCOperandInfo &MCOI = getDesc().OpInfo[i];
1277       if (MCOI.isPredicate())
1278         OS << "pred:";
1279       if (MCOI.isOptionalDef())
1280         OS << "opt:";
1281     }
1282     if (isDebugValue() && MO.isMetadata()) {
1283       // Pretty print DBG_VALUE instructions.
1284       auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1285       if (DIV && !DIV->getName().empty())
1286         OS << "!\"" << DIV->getName() << '\"';
1287       else
1288         MO.print(OS, MST, TRI);
1289     } else if (TRI && (isInsertSubreg() || isRegSequence() ||
1290                        (isSubregToReg() && i == 3)) && MO.isImm()) {
1291       OS << TRI->getSubRegIndexName(MO.getImm());
1292     } else if (i == AsmDescOp && MO.isImm()) {
1293       // Pretty print the inline asm operand descriptor.
1294       OS << '$' << AsmOpCount++;
1295       unsigned Flag = MO.getImm();
1296       switch (InlineAsm::getKind(Flag)) {
1297       case InlineAsm::Kind_RegUse:             OS << ":[reguse"; break;
1298       case InlineAsm::Kind_RegDef:             OS << ":[regdef"; break;
1299       case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
1300       case InlineAsm::Kind_Clobber:            OS << ":[clobber"; break;
1301       case InlineAsm::Kind_Imm:                OS << ":[imm"; break;
1302       case InlineAsm::Kind_Mem:                OS << ":[mem"; break;
1303       default: OS << ":[??" << InlineAsm::getKind(Flag); break;
1304       }
1305 
1306       unsigned RCID = 0;
1307       if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1308           InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1309         if (TRI) {
1310           OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1311         } else
1312           OS << ":RC" << RCID;
1313       }
1314 
1315       if (InlineAsm::isMemKind(Flag)) {
1316         unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1317         switch (MCID) {
1318         case InlineAsm::Constraint_es: OS << ":es"; break;
1319         case InlineAsm::Constraint_i:  OS << ":i"; break;
1320         case InlineAsm::Constraint_m:  OS << ":m"; break;
1321         case InlineAsm::Constraint_o:  OS << ":o"; break;
1322         case InlineAsm::Constraint_v:  OS << ":v"; break;
1323         case InlineAsm::Constraint_Q:  OS << ":Q"; break;
1324         case InlineAsm::Constraint_R:  OS << ":R"; break;
1325         case InlineAsm::Constraint_S:  OS << ":S"; break;
1326         case InlineAsm::Constraint_T:  OS << ":T"; break;
1327         case InlineAsm::Constraint_Um: OS << ":Um"; break;
1328         case InlineAsm::Constraint_Un: OS << ":Un"; break;
1329         case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
1330         case InlineAsm::Constraint_Us: OS << ":Us"; break;
1331         case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
1332         case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
1333         case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
1334         case InlineAsm::Constraint_X:  OS << ":X"; break;
1335         case InlineAsm::Constraint_Z:  OS << ":Z"; break;
1336         case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
1337         case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
1338         default: OS << ":?"; break;
1339         }
1340       }
1341 
1342       unsigned TiedTo = 0;
1343       if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1344         OS << " tiedto:$" << TiedTo;
1345 
1346       OS << ']';
1347 
1348       // Compute the index of the next operand descriptor.
1349       AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1350     } else
1351       MO.print(OS, MST, TRI);
1352   }
1353 
1354   bool HaveSemi = false;
1355   const unsigned PrintableFlags = FrameSetup | FrameDestroy;
1356   if (Flags & PrintableFlags) {
1357     if (!HaveSemi) {
1358       OS << ";";
1359       HaveSemi = true;
1360     }
1361     OS << " flags: ";
1362 
1363     if (Flags & FrameSetup)
1364       OS << "FrameSetup";
1365 
1366     if (Flags & FrameDestroy)
1367       OS << "FrameDestroy";
1368   }
1369 
1370   if (!memoperands_empty()) {
1371     if (!HaveSemi) {
1372       OS << ";";
1373       HaveSemi = true;
1374     }
1375 
1376     OS << " mem:";
1377     for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
1378          i != e; ++i) {
1379       (*i)->print(OS, MST);
1380       if (std::next(i) != e)
1381         OS << " ";
1382     }
1383   }
1384 
1385   // Print the regclass of any virtual registers encountered.
1386   if (MRI && !VirtRegs.empty()) {
1387     if (!HaveSemi) {
1388       OS << ";";
1389       HaveSemi = true;
1390     }
1391     for (unsigned i = 0; i != VirtRegs.size(); ++i) {
1392       const RegClassOrRegBank &RC = MRI->getRegClassOrRegBank(VirtRegs[i]);
1393       if (!RC)
1394         continue;
1395       // Generic virtual registers do not have register classes.
1396       if (RC.is<const RegisterBank *>())
1397         OS << " " << RC.get<const RegisterBank *>()->getName();
1398       else
1399         OS << " "
1400            << TRI->getRegClassName(RC.get<const TargetRegisterClass *>());
1401       OS << ':' << printReg(VirtRegs[i]);
1402       for (unsigned j = i+1; j != VirtRegs.size();) {
1403         if (MRI->getRegClassOrRegBank(VirtRegs[j]) != RC) {
1404           ++j;
1405           continue;
1406         }
1407         if (VirtRegs[i] != VirtRegs[j])
1408           OS << "," << printReg(VirtRegs[j]);
1409         VirtRegs.erase(VirtRegs.begin()+j);
1410       }
1411     }
1412   }
1413 
1414   // Print debug location information.
1415   if (isDebugValue() && getOperand(e - 2).isMetadata()) {
1416     if (!HaveSemi)
1417       OS << ";";
1418     auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
1419     OS << " line no:" <<  DV->getLine();
1420     if (auto *InlinedAt = debugLoc->getInlinedAt()) {
1421       DebugLoc InlinedAtDL(InlinedAt);
1422       if (InlinedAtDL && MF) {
1423         OS << " inlined @[ ";
1424         InlinedAtDL.print(OS);
1425         OS << " ]";
1426       }
1427     }
1428     if (isIndirectDebugValue())
1429       OS << " indirect";
1430   } else if (SkipDebugLoc) {
1431     return;
1432   } else if (debugLoc && MF) {
1433     if (!HaveSemi)
1434       OS << ";";
1435     OS << " dbg:";
1436     debugLoc.print(OS);
1437   }
1438 
1439   OS << '\n';
1440 }
1441 
1442 bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
1443                                      const TargetRegisterInfo *RegInfo,
1444                                      bool AddIfNotFound) {
1445   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
1446   bool hasAliases = isPhysReg &&
1447     MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1448   bool Found = false;
1449   SmallVector<unsigned,4> DeadOps;
1450   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1451     MachineOperand &MO = getOperand(i);
1452     if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1453       continue;
1454 
1455     // DEBUG_VALUE nodes do not contribute to code generation and should
1456     // always be ignored. Failure to do so may result in trying to modify
1457     // KILL flags on DEBUG_VALUE nodes.
1458     if (MO.isDebug())
1459       continue;
1460 
1461     unsigned Reg = MO.getReg();
1462     if (!Reg)
1463       continue;
1464 
1465     if (Reg == IncomingReg) {
1466       if (!Found) {
1467         if (MO.isKill())
1468           // The register is already marked kill.
1469           return true;
1470         if (isPhysReg && isRegTiedToDefOperand(i))
1471           // Two-address uses of physregs must not be marked kill.
1472           return true;
1473         MO.setIsKill();
1474         Found = true;
1475       }
1476     } else if (hasAliases && MO.isKill() &&
1477                TargetRegisterInfo::isPhysicalRegister(Reg)) {
1478       // A super-register kill already exists.
1479       if (RegInfo->isSuperRegister(IncomingReg, Reg))
1480         return true;
1481       if (RegInfo->isSubRegister(IncomingReg, Reg))
1482         DeadOps.push_back(i);
1483     }
1484   }
1485 
1486   // Trim unneeded kill operands.
1487   while (!DeadOps.empty()) {
1488     unsigned OpIdx = DeadOps.back();
1489     if (getOperand(OpIdx).isImplicit())
1490       RemoveOperand(OpIdx);
1491     else
1492       getOperand(OpIdx).setIsKill(false);
1493     DeadOps.pop_back();
1494   }
1495 
1496   // If not found, this means an alias of one of the operands is killed. Add a
1497   // new implicit operand if required.
1498   if (!Found && AddIfNotFound) {
1499     addOperand(MachineOperand::CreateReg(IncomingReg,
1500                                          false /*IsDef*/,
1501                                          true  /*IsImp*/,
1502                                          true  /*IsKill*/));
1503     return true;
1504   }
1505   return Found;
1506 }
1507 
1508 void MachineInstr::clearRegisterKills(unsigned Reg,
1509                                       const TargetRegisterInfo *RegInfo) {
1510   if (!TargetRegisterInfo::isPhysicalRegister(Reg))
1511     RegInfo = nullptr;
1512   for (MachineOperand &MO : operands()) {
1513     if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1514       continue;
1515     unsigned OpReg = MO.getReg();
1516     if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1517       MO.setIsKill(false);
1518   }
1519 }
1520 
1521 bool MachineInstr::addRegisterDead(unsigned Reg,
1522                                    const TargetRegisterInfo *RegInfo,
1523                                    bool AddIfNotFound) {
1524   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
1525   bool hasAliases = isPhysReg &&
1526     MCRegAliasIterator(Reg, RegInfo, false).isValid();
1527   bool Found = false;
1528   SmallVector<unsigned,4> DeadOps;
1529   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1530     MachineOperand &MO = getOperand(i);
1531     if (!MO.isReg() || !MO.isDef())
1532       continue;
1533     unsigned MOReg = MO.getReg();
1534     if (!MOReg)
1535       continue;
1536 
1537     if (MOReg == Reg) {
1538       MO.setIsDead();
1539       Found = true;
1540     } else if (hasAliases && MO.isDead() &&
1541                TargetRegisterInfo::isPhysicalRegister(MOReg)) {
1542       // There exists a super-register that's marked dead.
1543       if (RegInfo->isSuperRegister(Reg, MOReg))
1544         return true;
1545       if (RegInfo->isSubRegister(Reg, MOReg))
1546         DeadOps.push_back(i);
1547     }
1548   }
1549 
1550   // Trim unneeded dead operands.
1551   while (!DeadOps.empty()) {
1552     unsigned OpIdx = DeadOps.back();
1553     if (getOperand(OpIdx).isImplicit())
1554       RemoveOperand(OpIdx);
1555     else
1556       getOperand(OpIdx).setIsDead(false);
1557     DeadOps.pop_back();
1558   }
1559 
1560   // If not found, this means an alias of one of the operands is dead. Add a
1561   // new implicit operand if required.
1562   if (Found || !AddIfNotFound)
1563     return Found;
1564 
1565   addOperand(MachineOperand::CreateReg(Reg,
1566                                        true  /*IsDef*/,
1567                                        true  /*IsImp*/,
1568                                        false /*IsKill*/,
1569                                        true  /*IsDead*/));
1570   return true;
1571 }
1572 
1573 void MachineInstr::clearRegisterDeads(unsigned Reg) {
1574   for (MachineOperand &MO : operands()) {
1575     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
1576       continue;
1577     MO.setIsDead(false);
1578   }
1579 }
1580 
1581 void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
1582   for (MachineOperand &MO : operands()) {
1583     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
1584       continue;
1585     MO.setIsUndef(IsUndef);
1586   }
1587 }
1588 
1589 void MachineInstr::addRegisterDefined(unsigned Reg,
1590                                       const TargetRegisterInfo *RegInfo) {
1591   if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1592     MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
1593     if (MO)
1594       return;
1595   } else {
1596     for (const MachineOperand &MO : operands()) {
1597       if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1598           MO.getSubReg() == 0)
1599         return;
1600     }
1601   }
1602   addOperand(MachineOperand::CreateReg(Reg,
1603                                        true  /*IsDef*/,
1604                                        true  /*IsImp*/));
1605 }
1606 
1607 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1608                                          const TargetRegisterInfo &TRI) {
1609   bool HasRegMask = false;
1610   for (MachineOperand &MO : operands()) {
1611     if (MO.isRegMask()) {
1612       HasRegMask = true;
1613       continue;
1614     }
1615     if (!MO.isReg() || !MO.isDef()) continue;
1616     unsigned Reg = MO.getReg();
1617     if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
1618     // If there are no uses, including partial uses, the def is dead.
1619     if (llvm::none_of(UsedRegs,
1620                       [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
1621       MO.setIsDead();
1622   }
1623 
1624   // This is a call with a register mask operand.
1625   // Mask clobbers are always dead, so add defs for the non-dead defines.
1626   if (HasRegMask)
1627     for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1628          I != E; ++I)
1629       addRegisterDefined(*I, &TRI);
1630 }
1631 
1632 unsigned
1633 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
1634   // Build up a buffer of hash code components.
1635   SmallVector<size_t, 8> HashComponents;
1636   HashComponents.reserve(MI->getNumOperands() + 1);
1637   HashComponents.push_back(MI->getOpcode());
1638   for (const MachineOperand &MO : MI->operands()) {
1639     if (MO.isReg() && MO.isDef() &&
1640         TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1641       continue;  // Skip virtual register defs.
1642 
1643     HashComponents.push_back(hash_value(MO));
1644   }
1645   return hash_combine_range(HashComponents.begin(), HashComponents.end());
1646 }
1647 
1648 void MachineInstr::emitError(StringRef Msg) const {
1649   // Find the source location cookie.
1650   unsigned LocCookie = 0;
1651   const MDNode *LocMD = nullptr;
1652   for (unsigned i = getNumOperands(); i != 0; --i) {
1653     if (getOperand(i-1).isMetadata() &&
1654         (LocMD = getOperand(i-1).getMetadata()) &&
1655         LocMD->getNumOperands() != 0) {
1656       if (const ConstantInt *CI =
1657               mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
1658         LocCookie = CI->getZExtValue();
1659         break;
1660       }
1661     }
1662   }
1663 
1664   if (const MachineBasicBlock *MBB = getParent())
1665     if (const MachineFunction *MF = MBB->getParent())
1666       return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
1667   report_fatal_error(Msg);
1668 }
1669 
1670 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1671                                   const MCInstrDesc &MCID, bool IsIndirect,
1672                                   unsigned Reg, const MDNode *Variable,
1673                                   const MDNode *Expr) {
1674   assert(isa<DILocalVariable>(Variable) && "not a variable");
1675   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1676   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1677          "Expected inlined-at fields to agree");
1678   if (IsIndirect)
1679     return BuildMI(MF, DL, MCID)
1680         .addReg(Reg, RegState::Debug)
1681         .addImm(0U)
1682         .addMetadata(Variable)
1683         .addMetadata(Expr);
1684   else
1685     return BuildMI(MF, DL, MCID)
1686         .addReg(Reg, RegState::Debug)
1687         .addReg(0U, RegState::Debug)
1688         .addMetadata(Variable)
1689         .addMetadata(Expr);
1690 }
1691 
1692 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1693                                   MachineBasicBlock::iterator I,
1694                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1695                                   bool IsIndirect, unsigned Reg,
1696                                   const MDNode *Variable, const MDNode *Expr) {
1697   assert(isa<DILocalVariable>(Variable) && "not a variable");
1698   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1699   MachineFunction &MF = *BB.getParent();
1700   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
1701   BB.insert(I, MI);
1702   return MachineInstrBuilder(MF, MI);
1703 }
1704 
1705 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
1706 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
1707 static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
1708   assert(MI.getOperand(0).isReg() && "can't spill non-register");
1709   assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
1710          "Expected inlined-at fields to agree");
1711 
1712   const DIExpression *Expr = MI.getDebugExpression();
1713   if (MI.isIndirectDebugValue()) {
1714     assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
1715     Expr = DIExpression::prepend(Expr, DIExpression::WithDeref);
1716   }
1717   return Expr;
1718 }
1719 
1720 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
1721                                           MachineBasicBlock::iterator I,
1722                                           const MachineInstr &Orig,
1723                                           int FrameIndex) {
1724   const DIExpression *Expr = computeExprForSpill(Orig);
1725   return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
1726       .addFrameIndex(FrameIndex)
1727       .addImm(0U)
1728       .addMetadata(Orig.getDebugVariable())
1729       .addMetadata(Expr);
1730 }
1731 
1732 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
1733   const DIExpression *Expr = computeExprForSpill(Orig);
1734   Orig.getOperand(0).ChangeToFrameIndex(FrameIndex);
1735   Orig.getOperand(1).ChangeToImmediate(0U);
1736   Orig.getOperand(3).setMetadata(Expr);
1737 }
1738