1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Methods common to all machine instructions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/MachineInstr.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/FoldingSet.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/Loads.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineInstrBundle.h"
32 #include "llvm/CodeGen/MachineMemOperand.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/PseudoSourceValue.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Intrinsics.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/ModuleSlotTracker.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/MC/MCInstrDesc.h"
56 #include "llvm/MC/MCRegisterInfo.h"
57 #include "llvm/MC/MCSymbol.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/LowLevelTypeImpl.h"
64 #include "llvm/Support/MathExtras.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Target/TargetIntrinsicInfo.h"
67 #include "llvm/Target/TargetMachine.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <cstring>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 
78 static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
79   if (const MachineBasicBlock *MBB = MI.getParent())
80     if (const MachineFunction *MF = MBB->getParent())
81       return MF;
82   return nullptr;
83 }
84 
85 // Try to crawl up to the machine function and get TRI and IntrinsicInfo from
86 // it.
87 static void tryToGetTargetInfo(const MachineInstr &MI,
88                                const TargetRegisterInfo *&TRI,
89                                const MachineRegisterInfo *&MRI,
90                                const TargetIntrinsicInfo *&IntrinsicInfo,
91                                const TargetInstrInfo *&TII) {
92 
93   if (const MachineFunction *MF = getMFIfAvailable(MI)) {
94     TRI = MF->getSubtarget().getRegisterInfo();
95     MRI = &MF->getRegInfo();
96     IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
97     TII = MF->getSubtarget().getInstrInfo();
98   }
99 }
100 
101 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
102   if (MCID->ImplicitDefs)
103     for (const MCPhysReg *ImpDefs = MCID->getImplicitDefs(); *ImpDefs;
104            ++ImpDefs)
105       addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true));
106   if (MCID->ImplicitUses)
107     for (const MCPhysReg *ImpUses = MCID->getImplicitUses(); *ImpUses;
108            ++ImpUses)
109       addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true));
110 }
111 
112 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
113 /// implicit operands. It reserves space for the number of operands specified by
114 /// the MCInstrDesc.
115 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid,
116                            DebugLoc dl, bool NoImp)
117     : MCID(&tid), debugLoc(std::move(dl)) {
118   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
119 
120   // Reserve space for the expected number of operands.
121   if (unsigned NumOps = MCID->getNumOperands() +
122     MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) {
123     CapOperands = OperandCapacity::get(NumOps);
124     Operands = MF.allocateOperandArray(CapOperands);
125   }
126 
127   if (!NoImp)
128     addImplicitDefUseOperands(MF);
129 }
130 
131 /// MachineInstr ctor - Copies MachineInstr arg exactly
132 ///
133 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
134     : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
135   assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
136 
137   CapOperands = OperandCapacity::get(MI.getNumOperands());
138   Operands = MF.allocateOperandArray(CapOperands);
139 
140   // Copy operands.
141   for (const MachineOperand &MO : MI.operands())
142     addOperand(MF, MO);
143 
144   // Copy all the sensible flags.
145   setFlags(MI.Flags);
146 }
147 
148 /// getRegInfo - If this instruction is embedded into a MachineFunction,
149 /// return the MachineRegisterInfo object for the current function, otherwise
150 /// return null.
151 MachineRegisterInfo *MachineInstr::getRegInfo() {
152   if (MachineBasicBlock *MBB = getParent())
153     return &MBB->getParent()->getRegInfo();
154   return nullptr;
155 }
156 
157 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
158 /// this instruction from their respective use lists.  This requires that the
159 /// operands already be on their use lists.
160 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
161   for (MachineOperand &MO : operands())
162     if (MO.isReg())
163       MRI.removeRegOperandFromUseList(&MO);
164 }
165 
166 /// AddRegOperandsToUseLists - Add all of the register operands in
167 /// this instruction from their respective use lists.  This requires that the
168 /// operands not be on their use lists yet.
169 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) {
170   for (MachineOperand &MO : operands())
171     if (MO.isReg())
172       MRI.addRegOperandToUseList(&MO);
173 }
174 
175 void MachineInstr::addOperand(const MachineOperand &Op) {
176   MachineBasicBlock *MBB = getParent();
177   assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
178   MachineFunction *MF = MBB->getParent();
179   assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
180   addOperand(*MF, Op);
181 }
182 
183 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
184 /// ranges. If MRI is non-null also update use-def chains.
185 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
186                          unsigned NumOps, MachineRegisterInfo *MRI) {
187   if (MRI)
188     return MRI->moveOperands(Dst, Src, NumOps);
189 
190   // MachineOperand is a trivially copyable type so we can just use memmove.
191   std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
192 }
193 
194 /// addOperand - Add the specified operand to the instruction.  If it is an
195 /// implicit operand, it is added to the end of the operand list.  If it is
196 /// an explicit operand it is added at the end of the explicit operand list
197 /// (before the first implicit operand).
198 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
199   assert(MCID && "Cannot add operands before providing an instr descriptor");
200 
201   // Check if we're adding one of our existing operands.
202   if (&Op >= Operands && &Op < Operands + NumOperands) {
203     // This is unusual: MI->addOperand(MI->getOperand(i)).
204     // If adding Op requires reallocating or moving existing operands around,
205     // the Op reference could go stale. Support it by copying Op.
206     MachineOperand CopyOp(Op);
207     return addOperand(MF, CopyOp);
208   }
209 
210   // Find the insert location for the new operand.  Implicit registers go at
211   // the end, everything else goes before the implicit regs.
212   //
213   // FIXME: Allow mixed explicit and implicit operands on inline asm.
214   // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
215   // implicit-defs, but they must not be moved around.  See the FIXME in
216   // InstrEmitter.cpp.
217   unsigned OpNo = getNumOperands();
218   bool isImpReg = Op.isReg() && Op.isImplicit();
219   if (!isImpReg && !isInlineAsm()) {
220     while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
221       --OpNo;
222       assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
223     }
224   }
225 
226 #ifndef NDEBUG
227   bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata;
228   // OpNo now points as the desired insertion point.  Unless this is a variadic
229   // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
230   // RegMask operands go between the explicit and implicit operands.
231   assert((isImpReg || Op.isRegMask() || MCID->isVariadic() ||
232           OpNo < MCID->getNumOperands() || isMetaDataOp) &&
233          "Trying to add an operand to a machine instr that is already done!");
234 #endif
235 
236   MachineRegisterInfo *MRI = getRegInfo();
237 
238   // Determine if the Operands array needs to be reallocated.
239   // Save the old capacity and operand array.
240   OperandCapacity OldCap = CapOperands;
241   MachineOperand *OldOperands = Operands;
242   if (!OldOperands || OldCap.getSize() == getNumOperands()) {
243     CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
244     Operands = MF.allocateOperandArray(CapOperands);
245     // Move the operands before the insertion point.
246     if (OpNo)
247       moveOperands(Operands, OldOperands, OpNo, MRI);
248   }
249 
250   // Move the operands following the insertion point.
251   if (OpNo != NumOperands)
252     moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
253                  MRI);
254   ++NumOperands;
255 
256   // Deallocate the old operand array.
257   if (OldOperands != Operands && OldOperands)
258     MF.deallocateOperandArray(OldCap, OldOperands);
259 
260   // Copy Op into place. It still needs to be inserted into the MRI use lists.
261   MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
262   NewMO->ParentMI = this;
263 
264   // When adding a register operand, tell MRI about it.
265   if (NewMO->isReg()) {
266     // Ensure isOnRegUseList() returns false, regardless of Op's status.
267     NewMO->Contents.Reg.Prev = nullptr;
268     // Ignore existing ties. This is not a property that can be copied.
269     NewMO->TiedTo = 0;
270     // Add the new operand to MRI, but only for instructions in an MBB.
271     if (MRI)
272       MRI->addRegOperandToUseList(NewMO);
273     // The MCID operand information isn't accurate until we start adding
274     // explicit operands. The implicit operands are added first, then the
275     // explicits are inserted before them.
276     if (!isImpReg) {
277       // Tie uses to defs as indicated in MCInstrDesc.
278       if (NewMO->isUse()) {
279         int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
280         if (DefIdx != -1)
281           tieOperands(DefIdx, OpNo);
282       }
283       // If the register operand is flagged as early, mark the operand as such.
284       if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
285         NewMO->setIsEarlyClobber(true);
286     }
287   }
288 }
289 
290 /// RemoveOperand - Erase an operand  from an instruction, leaving it with one
291 /// fewer operand than it started with.
292 ///
293 void MachineInstr::RemoveOperand(unsigned OpNo) {
294   assert(OpNo < getNumOperands() && "Invalid operand number");
295   untieRegOperand(OpNo);
296 
297 #ifndef NDEBUG
298   // Moving tied operands would break the ties.
299   for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
300     if (Operands[i].isReg())
301       assert(!Operands[i].isTied() && "Cannot move tied operands");
302 #endif
303 
304   MachineRegisterInfo *MRI = getRegInfo();
305   if (MRI && Operands[OpNo].isReg())
306     MRI->removeRegOperandFromUseList(Operands + OpNo);
307 
308   // Don't call the MachineOperand destructor. A lot of this code depends on
309   // MachineOperand having a trivial destructor anyway, and adding a call here
310   // wouldn't make it 'destructor-correct'.
311 
312   if (unsigned N = NumOperands - 1 - OpNo)
313     moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
314   --NumOperands;
315 }
316 
317 void MachineInstr::dropMemRefs(MachineFunction &MF) {
318   if (memoperands_empty())
319     return;
320 
321   // See if we can just drop all of our extra info.
322   if (!getPreInstrSymbol() && !getPostInstrSymbol()) {
323     Info.clear();
324     return;
325   }
326   if (!getPostInstrSymbol()) {
327     Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
328     return;
329   }
330   if (!getPreInstrSymbol()) {
331     Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
332     return;
333   }
334 
335   // Otherwise allocate a fresh extra info with just these symbols.
336   Info.set<EIIK_OutOfLine>(
337       MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol()));
338 }
339 
340 void MachineInstr::setMemRefs(MachineFunction &MF,
341                               ArrayRef<MachineMemOperand *> MMOs) {
342   if (MMOs.empty()) {
343     dropMemRefs(MF);
344     return;
345   }
346 
347   // Try to store a single MMO inline.
348   if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) {
349     Info.set<EIIK_MMO>(MMOs[0]);
350     return;
351   }
352 
353   // Otherwise create an extra info struct with all of our info.
354   Info.set<EIIK_OutOfLine>(
355       MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol()));
356 }
357 
358 void MachineInstr::addMemOperand(MachineFunction &MF,
359                                  MachineMemOperand *MO) {
360   SmallVector<MachineMemOperand *, 2> MMOs;
361   MMOs.append(memoperands_begin(), memoperands_end());
362   MMOs.push_back(MO);
363   setMemRefs(MF, MMOs);
364 }
365 
366 void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
367   if (this == &MI)
368     // Nothing to do for a self-clone!
369     return;
370 
371   assert(&MF == MI.getMF() &&
372          "Invalid machine functions when cloning memory refrences!");
373   // See if we can just steal the extra info already allocated for the
374   // instruction. We can do this whenever the pre- and post-instruction symbols
375   // are the same (including null).
376   if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
377       getPostInstrSymbol() == MI.getPostInstrSymbol()) {
378     Info = MI.Info;
379     return;
380   }
381 
382   // Otherwise, fall back on a copy-based clone.
383   setMemRefs(MF, MI.memoperands());
384 }
385 
386 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
387 /// identical.
388 static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
389                              ArrayRef<MachineMemOperand *> RHS) {
390   if (LHS.size() != RHS.size())
391     return false;
392 
393   auto LHSPointees = make_pointee_range(LHS);
394   auto RHSPointees = make_pointee_range(RHS);
395   return std::equal(LHSPointees.begin(), LHSPointees.end(),
396                     RHSPointees.begin());
397 }
398 
399 void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
400                                       ArrayRef<const MachineInstr *> MIs) {
401   // Try handling easy numbers of MIs with simpler mechanisms.
402   if (MIs.empty()) {
403     dropMemRefs(MF);
404     return;
405   }
406   if (MIs.size() == 1) {
407     cloneMemRefs(MF, *MIs[0]);
408     return;
409   }
410   // Because an empty memoperands list provides *no* information and must be
411   // handled conservatively (assuming the instruction can do anything), the only
412   // way to merge with it is to drop all other memoperands.
413   if (MIs[0]->memoperands_empty()) {
414     dropMemRefs(MF);
415     return;
416   }
417 
418   // Handle the general case.
419   SmallVector<MachineMemOperand *, 2> MergedMMOs;
420   // Start with the first instruction.
421   assert(&MF == MIs[0]->getMF() &&
422          "Invalid machine functions when cloning memory references!");
423   MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
424   // Now walk all the other instructions and accumulate any different MMOs.
425   for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
426     assert(&MF == MI.getMF() &&
427            "Invalid machine functions when cloning memory references!");
428 
429     // Skip MIs with identical operands to the first. This is a somewhat
430     // arbitrary hack but will catch common cases without being quadratic.
431     // TODO: We could fully implement merge semantics here if needed.
432     if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
433       continue;
434 
435     // Because an empty memoperands list provides *no* information and must be
436     // handled conservatively (assuming the instruction can do anything), the
437     // only way to merge with it is to drop all other memoperands.
438     if (MI.memoperands_empty()) {
439       dropMemRefs(MF);
440       return;
441     }
442 
443     // Otherwise accumulate these into our temporary buffer of the merged state.
444     MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
445   }
446 
447   setMemRefs(MF, MergedMMOs);
448 }
449 
450 void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
451   MCSymbol *OldSymbol = getPreInstrSymbol();
452   if (OldSymbol == Symbol)
453     return;
454   if (OldSymbol && !Symbol) {
455     // We're removing a symbol rather than adding one. Try to clean up any
456     // extra info carried around.
457     if (Info.is<EIIK_PreInstrSymbol>()) {
458       Info.clear();
459       return;
460     }
461 
462     if (memoperands_empty()) {
463       assert(getPostInstrSymbol() &&
464              "Should never have only a single symbol allocated out-of-line!");
465       Info.set<EIIK_PostInstrSymbol>(getPostInstrSymbol());
466       return;
467     }
468 
469     // Otherwise fallback on the generic update.
470   } else if (!Info || Info.is<EIIK_PreInstrSymbol>()) {
471     // If we don't have any other extra info, we can store this inline.
472     Info.set<EIIK_PreInstrSymbol>(Symbol);
473     return;
474   }
475 
476   // Otherwise, allocate a full new set of extra info.
477   // FIXME: Maybe we should make the symbols in the extra info mutable?
478   Info.set<EIIK_OutOfLine>(
479       MF.createMIExtraInfo(memoperands(), Symbol, getPostInstrSymbol()));
480 }
481 
482 void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
483   MCSymbol *OldSymbol = getPostInstrSymbol();
484   if (OldSymbol == Symbol)
485     return;
486   if (OldSymbol && !Symbol) {
487     // We're removing a symbol rather than adding one. Try to clean up any
488     // extra info carried around.
489     if (Info.is<EIIK_PostInstrSymbol>()) {
490       Info.clear();
491       return;
492     }
493 
494     if (memoperands_empty()) {
495       assert(getPreInstrSymbol() &&
496              "Should never have only a single symbol allocated out-of-line!");
497       Info.set<EIIK_PreInstrSymbol>(getPreInstrSymbol());
498       return;
499     }
500 
501     // Otherwise fallback on the generic update.
502   } else if (!Info || Info.is<EIIK_PostInstrSymbol>()) {
503     // If we don't have any other extra info, we can store this inline.
504     Info.set<EIIK_PostInstrSymbol>(Symbol);
505     return;
506   }
507 
508   // Otherwise, allocate a full new set of extra info.
509   // FIXME: Maybe we should make the symbols in the extra info mutable?
510   Info.set<EIIK_OutOfLine>(
511       MF.createMIExtraInfo(memoperands(), getPreInstrSymbol(), Symbol));
512 }
513 
514 uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
515   // For now, the just return the union of the flags. If the flags get more
516   // complicated over time, we might need more logic here.
517   return getFlags() | Other.getFlags();
518 }
519 
520 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
521   assert(!isBundledWithPred() && "Must be called on bundle header");
522   for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
523     if (MII->getDesc().getFlags() & Mask) {
524       if (Type == AnyInBundle)
525         return true;
526     } else {
527       if (Type == AllInBundle && !MII->isBundle())
528         return false;
529     }
530     // This was the last instruction in the bundle.
531     if (!MII->isBundledWithSucc())
532       return Type == AllInBundle;
533   }
534 }
535 
536 bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
537                                  MICheckType Check) const {
538   // If opcodes or number of operands are not the same then the two
539   // instructions are obviously not identical.
540   if (Other.getOpcode() != getOpcode() ||
541       Other.getNumOperands() != getNumOperands())
542     return false;
543 
544   if (isBundle()) {
545     // We have passed the test above that both instructions have the same
546     // opcode, so we know that both instructions are bundles here. Let's compare
547     // MIs inside the bundle.
548     assert(Other.isBundle() && "Expected that both instructions are bundles.");
549     MachineBasicBlock::const_instr_iterator I1 = getIterator();
550     MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
551     // Loop until we analysed the last intruction inside at least one of the
552     // bundles.
553     while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
554       ++I1;
555       ++I2;
556       if (!I1->isIdenticalTo(*I2, Check))
557         return false;
558     }
559     // If we've reached the end of just one of the two bundles, but not both,
560     // the instructions are not identical.
561     if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
562       return false;
563   }
564 
565   // Check operands to make sure they match.
566   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
567     const MachineOperand &MO = getOperand(i);
568     const MachineOperand &OMO = Other.getOperand(i);
569     if (!MO.isReg()) {
570       if (!MO.isIdenticalTo(OMO))
571         return false;
572       continue;
573     }
574 
575     // Clients may or may not want to ignore defs when testing for equality.
576     // For example, machine CSE pass only cares about finding common
577     // subexpressions, so it's safe to ignore virtual register defs.
578     if (MO.isDef()) {
579       if (Check == IgnoreDefs)
580         continue;
581       else if (Check == IgnoreVRegDefs) {
582         if (!TargetRegisterInfo::isVirtualRegister(MO.getReg()) ||
583             !TargetRegisterInfo::isVirtualRegister(OMO.getReg()))
584           if (!MO.isIdenticalTo(OMO))
585             return false;
586       } else {
587         if (!MO.isIdenticalTo(OMO))
588           return false;
589         if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
590           return false;
591       }
592     } else {
593       if (!MO.isIdenticalTo(OMO))
594         return false;
595       if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
596         return false;
597     }
598   }
599   // If DebugLoc does not match then two debug instructions are not identical.
600   if (isDebugInstr())
601     if (getDebugLoc() && Other.getDebugLoc() &&
602         getDebugLoc() != Other.getDebugLoc())
603       return false;
604   return true;
605 }
606 
607 const MachineFunction *MachineInstr::getMF() const {
608   return getParent()->getParent();
609 }
610 
611 MachineInstr *MachineInstr::removeFromParent() {
612   assert(getParent() && "Not embedded in a basic block!");
613   return getParent()->remove(this);
614 }
615 
616 MachineInstr *MachineInstr::removeFromBundle() {
617   assert(getParent() && "Not embedded in a basic block!");
618   return getParent()->remove_instr(this);
619 }
620 
621 void MachineInstr::eraseFromParent() {
622   assert(getParent() && "Not embedded in a basic block!");
623   getParent()->erase(this);
624 }
625 
626 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() {
627   assert(getParent() && "Not embedded in a basic block!");
628   MachineBasicBlock *MBB = getParent();
629   MachineFunction *MF = MBB->getParent();
630   assert(MF && "Not embedded in a function!");
631 
632   MachineInstr *MI = (MachineInstr *)this;
633   MachineRegisterInfo &MRI = MF->getRegInfo();
634 
635   for (const MachineOperand &MO : MI->operands()) {
636     if (!MO.isReg() || !MO.isDef())
637       continue;
638     unsigned Reg = MO.getReg();
639     if (!TargetRegisterInfo::isVirtualRegister(Reg))
640       continue;
641     MRI.markUsesInDebugValueAsUndef(Reg);
642   }
643   MI->eraseFromParent();
644 }
645 
646 void MachineInstr::eraseFromBundle() {
647   assert(getParent() && "Not embedded in a basic block!");
648   getParent()->erase_instr(this);
649 }
650 
651 unsigned MachineInstr::getNumExplicitOperands() const {
652   unsigned NumOperands = MCID->getNumOperands();
653   if (!MCID->isVariadic())
654     return NumOperands;
655 
656   for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
657     const MachineOperand &MO = getOperand(I);
658     // The operands must always be in the following order:
659     // - explicit reg defs,
660     // - other explicit operands (reg uses, immediates, etc.),
661     // - implicit reg defs
662     // - implicit reg uses
663     if (MO.isReg() && MO.isImplicit())
664       break;
665     ++NumOperands;
666   }
667   return NumOperands;
668 }
669 
670 unsigned MachineInstr::getNumExplicitDefs() const {
671   unsigned NumDefs = MCID->getNumDefs();
672   if (!MCID->isVariadic())
673     return NumDefs;
674 
675   for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
676     const MachineOperand &MO = getOperand(I);
677     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
678       break;
679     ++NumDefs;
680   }
681   return NumDefs;
682 }
683 
684 void MachineInstr::bundleWithPred() {
685   assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
686   setFlag(BundledPred);
687   MachineBasicBlock::instr_iterator Pred = getIterator();
688   --Pred;
689   assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
690   Pred->setFlag(BundledSucc);
691 }
692 
693 void MachineInstr::bundleWithSucc() {
694   assert(!isBundledWithSucc() && "MI is already bundled with its successor");
695   setFlag(BundledSucc);
696   MachineBasicBlock::instr_iterator Succ = getIterator();
697   ++Succ;
698   assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
699   Succ->setFlag(BundledPred);
700 }
701 
702 void MachineInstr::unbundleFromPred() {
703   assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
704   clearFlag(BundledPred);
705   MachineBasicBlock::instr_iterator Pred = getIterator();
706   --Pred;
707   assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
708   Pred->clearFlag(BundledSucc);
709 }
710 
711 void MachineInstr::unbundleFromSucc() {
712   assert(isBundledWithSucc() && "MI isn't bundled with its successor");
713   clearFlag(BundledSucc);
714   MachineBasicBlock::instr_iterator Succ = getIterator();
715   ++Succ;
716   assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
717   Succ->clearFlag(BundledPred);
718 }
719 
720 bool MachineInstr::isStackAligningInlineAsm() const {
721   if (isInlineAsm()) {
722     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
723     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
724       return true;
725   }
726   return false;
727 }
728 
729 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
730   assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
731   unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
732   return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
733 }
734 
735 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
736                                        unsigned *GroupNo) const {
737   assert(isInlineAsm() && "Expected an inline asm instruction");
738   assert(OpIdx < getNumOperands() && "OpIdx out of range");
739 
740   // Ignore queries about the initial operands.
741   if (OpIdx < InlineAsm::MIOp_FirstOperand)
742     return -1;
743 
744   unsigned Group = 0;
745   unsigned NumOps;
746   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
747        i += NumOps) {
748     const MachineOperand &FlagMO = getOperand(i);
749     // If we reach the implicit register operands, stop looking.
750     if (!FlagMO.isImm())
751       return -1;
752     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
753     if (i + NumOps > OpIdx) {
754       if (GroupNo)
755         *GroupNo = Group;
756       return i;
757     }
758     ++Group;
759   }
760   return -1;
761 }
762 
763 const DILabel *MachineInstr::getDebugLabel() const {
764   assert(isDebugLabel() && "not a DBG_LABEL");
765   return cast<DILabel>(getOperand(0).getMetadata());
766 }
767 
768 const DILocalVariable *MachineInstr::getDebugVariable() const {
769   assert(isDebugValue() && "not a DBG_VALUE");
770   return cast<DILocalVariable>(getOperand(2).getMetadata());
771 }
772 
773 const DIExpression *MachineInstr::getDebugExpression() const {
774   assert(isDebugValue() && "not a DBG_VALUE");
775   return cast<DIExpression>(getOperand(3).getMetadata());
776 }
777 
778 const TargetRegisterClass*
779 MachineInstr::getRegClassConstraint(unsigned OpIdx,
780                                     const TargetInstrInfo *TII,
781                                     const TargetRegisterInfo *TRI) const {
782   assert(getParent() && "Can't have an MBB reference here!");
783   assert(getMF() && "Can't have an MF reference here!");
784   const MachineFunction &MF = *getMF();
785 
786   // Most opcodes have fixed constraints in their MCInstrDesc.
787   if (!isInlineAsm())
788     return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
789 
790   if (!getOperand(OpIdx).isReg())
791     return nullptr;
792 
793   // For tied uses on inline asm, get the constraint from the def.
794   unsigned DefIdx;
795   if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
796     OpIdx = DefIdx;
797 
798   // Inline asm stores register class constraints in the flag word.
799   int FlagIdx = findInlineAsmFlagIdx(OpIdx);
800   if (FlagIdx < 0)
801     return nullptr;
802 
803   unsigned Flag = getOperand(FlagIdx).getImm();
804   unsigned RCID;
805   if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
806        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
807        InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
808       InlineAsm::hasRegClassConstraint(Flag, RCID))
809     return TRI->getRegClass(RCID);
810 
811   // Assume that all registers in a memory operand are pointers.
812   if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem)
813     return TRI->getPointerRegClass(MF);
814 
815   return nullptr;
816 }
817 
818 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
819     unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
820     const TargetRegisterInfo *TRI, bool ExploreBundle) const {
821   // Check every operands inside the bundle if we have
822   // been asked to.
823   if (ExploreBundle)
824     for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
825          ++OpndIt)
826       CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
827           OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
828   else
829     // Otherwise, just check the current operands.
830     for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
831       CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
832   return CurRC;
833 }
834 
835 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
836     unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
837     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
838   assert(CurRC && "Invalid initial register class");
839   // Check if Reg is constrained by some of its use/def from MI.
840   const MachineOperand &MO = getOperand(OpIdx);
841   if (!MO.isReg() || MO.getReg() != Reg)
842     return CurRC;
843   // If yes, accumulate the constraints through the operand.
844   return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
845 }
846 
847 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
848     unsigned OpIdx, const TargetRegisterClass *CurRC,
849     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
850   const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
851   const MachineOperand &MO = getOperand(OpIdx);
852   assert(MO.isReg() &&
853          "Cannot get register constraints for non-register operand");
854   assert(CurRC && "Invalid initial register class");
855   if (unsigned SubIdx = MO.getSubReg()) {
856     if (OpRC)
857       CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
858     else
859       CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
860   } else if (OpRC)
861     CurRC = TRI->getCommonSubClass(CurRC, OpRC);
862   return CurRC;
863 }
864 
865 /// Return the number of instructions inside the MI bundle, not counting the
866 /// header instruction.
867 unsigned MachineInstr::getBundleSize() const {
868   MachineBasicBlock::const_instr_iterator I = getIterator();
869   unsigned Size = 0;
870   while (I->isBundledWithSucc()) {
871     ++Size;
872     ++I;
873   }
874   return Size;
875 }
876 
877 /// Returns true if the MachineInstr has an implicit-use operand of exactly
878 /// the given register (not considering sub/super-registers).
879 bool MachineInstr::hasRegisterImplicitUseOperand(unsigned Reg) const {
880   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
881     const MachineOperand &MO = getOperand(i);
882     if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == Reg)
883       return true;
884   }
885   return false;
886 }
887 
888 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
889 /// the specific register or -1 if it is not found. It further tightens
890 /// the search criteria to a use that kills the register if isKill is true.
891 int MachineInstr::findRegisterUseOperandIdx(
892     unsigned Reg, bool isKill, const TargetRegisterInfo *TRI) const {
893   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
894     const MachineOperand &MO = getOperand(i);
895     if (!MO.isReg() || !MO.isUse())
896       continue;
897     unsigned MOReg = MO.getReg();
898     if (!MOReg)
899       continue;
900     if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) &&
901                          TargetRegisterInfo::isPhysicalRegister(Reg) &&
902                          TRI->isSubRegister(MOReg, Reg)))
903       if (!isKill || MO.isKill())
904         return i;
905   }
906   return -1;
907 }
908 
909 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
910 /// indicating if this instruction reads or writes Reg. This also considers
911 /// partial defines.
912 std::pair<bool,bool>
913 MachineInstr::readsWritesVirtualRegister(unsigned Reg,
914                                          SmallVectorImpl<unsigned> *Ops) const {
915   bool PartDef = false; // Partial redefine.
916   bool FullDef = false; // Full define.
917   bool Use = false;
918 
919   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
920     const MachineOperand &MO = getOperand(i);
921     if (!MO.isReg() || MO.getReg() != Reg)
922       continue;
923     if (Ops)
924       Ops->push_back(i);
925     if (MO.isUse())
926       Use |= !MO.isUndef();
927     else if (MO.getSubReg() && !MO.isUndef())
928       // A partial def undef doesn't count as reading the register.
929       PartDef = true;
930     else
931       FullDef = true;
932   }
933   // A partial redefine uses Reg unless there is also a full define.
934   return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
935 }
936 
937 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
938 /// the specified register or -1 if it is not found. If isDead is true, defs
939 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
940 /// also checks if there is a def of a super-register.
941 int
942 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
943                                         const TargetRegisterInfo *TRI) const {
944   bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
945   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
946     const MachineOperand &MO = getOperand(i);
947     // Accept regmask operands when Overlap is set.
948     // Ignore them when looking for a specific def operand (Overlap == false).
949     if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
950       return i;
951     if (!MO.isReg() || !MO.isDef())
952       continue;
953     unsigned MOReg = MO.getReg();
954     bool Found = (MOReg == Reg);
955     if (!Found && TRI && isPhys &&
956         TargetRegisterInfo::isPhysicalRegister(MOReg)) {
957       if (Overlap)
958         Found = TRI->regsOverlap(MOReg, Reg);
959       else
960         Found = TRI->isSubRegister(MOReg, Reg);
961     }
962     if (Found && (!isDead || MO.isDead()))
963       return i;
964   }
965   return -1;
966 }
967 
968 /// findFirstPredOperandIdx() - Find the index of the first operand in the
969 /// operand list that is used to represent the predicate. It returns -1 if
970 /// none is found.
971 int MachineInstr::findFirstPredOperandIdx() const {
972   // Don't call MCID.findFirstPredOperandIdx() because this variant
973   // is sometimes called on an instruction that's not yet complete, and
974   // so the number of operands is less than the MCID indicates. In
975   // particular, the PTX target does this.
976   const MCInstrDesc &MCID = getDesc();
977   if (MCID.isPredicable()) {
978     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
979       if (MCID.OpInfo[i].isPredicate())
980         return i;
981   }
982 
983   return -1;
984 }
985 
986 // MachineOperand::TiedTo is 4 bits wide.
987 const unsigned TiedMax = 15;
988 
989 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
990 ///
991 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
992 /// field. TiedTo can have these values:
993 ///
994 /// 0:              Operand is not tied to anything.
995 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
996 /// TiedMax:        Tied to an operand >= TiedMax-1.
997 ///
998 /// The tied def must be one of the first TiedMax operands on a normal
999 /// instruction. INLINEASM instructions allow more tied defs.
1000 ///
1001 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1002   MachineOperand &DefMO = getOperand(DefIdx);
1003   MachineOperand &UseMO = getOperand(UseIdx);
1004   assert(DefMO.isDef() && "DefIdx must be a def operand");
1005   assert(UseMO.isUse() && "UseIdx must be a use operand");
1006   assert(!DefMO.isTied() && "Def is already tied to another use");
1007   assert(!UseMO.isTied() && "Use is already tied to another def");
1008 
1009   if (DefIdx < TiedMax)
1010     UseMO.TiedTo = DefIdx + 1;
1011   else {
1012     // Inline asm can use the group descriptors to find tied operands, but on
1013     // normal instruction, the tied def must be within the first TiedMax
1014     // operands.
1015     assert(isInlineAsm() && "DefIdx out of range");
1016     UseMO.TiedTo = TiedMax;
1017   }
1018 
1019   // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1020   DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1021 }
1022 
1023 /// Given the index of a tied register operand, find the operand it is tied to.
1024 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
1025 /// which must exist.
1026 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1027   const MachineOperand &MO = getOperand(OpIdx);
1028   assert(MO.isTied() && "Operand isn't tied");
1029 
1030   // Normally TiedTo is in range.
1031   if (MO.TiedTo < TiedMax)
1032     return MO.TiedTo - 1;
1033 
1034   // Uses on normal instructions can be out of range.
1035   if (!isInlineAsm()) {
1036     // Normal tied defs must be in the 0..TiedMax-1 range.
1037     if (MO.isUse())
1038       return TiedMax - 1;
1039     // MO is a def. Search for the tied use.
1040     for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1041       const MachineOperand &UseMO = getOperand(i);
1042       if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1043         return i;
1044     }
1045     llvm_unreachable("Can't find tied use");
1046   }
1047 
1048   // Now deal with inline asm by parsing the operand group descriptor flags.
1049   // Find the beginning of each operand group.
1050   SmallVector<unsigned, 8> GroupIdx;
1051   unsigned OpIdxGroup = ~0u;
1052   unsigned NumOps;
1053   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1054        i += NumOps) {
1055     const MachineOperand &FlagMO = getOperand(i);
1056     assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1057     unsigned CurGroup = GroupIdx.size();
1058     GroupIdx.push_back(i);
1059     NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm());
1060     // OpIdx belongs to this operand group.
1061     if (OpIdx > i && OpIdx < i + NumOps)
1062       OpIdxGroup = CurGroup;
1063     unsigned TiedGroup;
1064     if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup))
1065       continue;
1066     // Operands in this group are tied to operands in TiedGroup which must be
1067     // earlier. Find the number of operands between the two groups.
1068     unsigned Delta = i - GroupIdx[TiedGroup];
1069 
1070     // OpIdx is a use tied to TiedGroup.
1071     if (OpIdxGroup == CurGroup)
1072       return OpIdx - Delta;
1073 
1074     // OpIdx is a def tied to this use group.
1075     if (OpIdxGroup == TiedGroup)
1076       return OpIdx + Delta;
1077   }
1078   llvm_unreachable("Invalid tied operand on inline asm");
1079 }
1080 
1081 /// clearKillInfo - Clears kill flags on all operands.
1082 ///
1083 void MachineInstr::clearKillInfo() {
1084   for (MachineOperand &MO : operands()) {
1085     if (MO.isReg() && MO.isUse())
1086       MO.setIsKill(false);
1087   }
1088 }
1089 
1090 void MachineInstr::substituteRegister(unsigned FromReg, unsigned ToReg,
1091                                       unsigned SubIdx,
1092                                       const TargetRegisterInfo &RegInfo) {
1093   if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
1094     if (SubIdx)
1095       ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1096     for (MachineOperand &MO : operands()) {
1097       if (!MO.isReg() || MO.getReg() != FromReg)
1098         continue;
1099       MO.substPhysReg(ToReg, RegInfo);
1100     }
1101   } else {
1102     for (MachineOperand &MO : operands()) {
1103       if (!MO.isReg() || MO.getReg() != FromReg)
1104         continue;
1105       MO.substVirtReg(ToReg, SubIdx, RegInfo);
1106     }
1107   }
1108 }
1109 
1110 /// isSafeToMove - Return true if it is safe to move this instruction. If
1111 /// SawStore is set to true, it means that there is a store (or call) between
1112 /// the instruction's location and its intended destination.
1113 bool MachineInstr::isSafeToMove(AliasAnalysis *AA, bool &SawStore) const {
1114   // Ignore stuff that we obviously can't move.
1115   //
1116   // Treat volatile loads as stores. This is not strictly necessary for
1117   // volatiles, but it is required for atomic loads. It is not allowed to move
1118   // a load across an atomic load with Ordering > Monotonic.
1119   if (mayStore() || isCall() || isPHI() ||
1120       (mayLoad() && hasOrderedMemoryRef())) {
1121     SawStore = true;
1122     return false;
1123   }
1124 
1125   if (isPosition() || isDebugInstr() || isTerminator() ||
1126       hasUnmodeledSideEffects())
1127     return false;
1128 
1129   // See if this instruction does a load.  If so, we have to guarantee that the
1130   // loaded value doesn't change between the load and the its intended
1131   // destination. The check for isInvariantLoad gives the targe the chance to
1132   // classify the load as always returning a constant, e.g. a constant pool
1133   // load.
1134   if (mayLoad() && !isDereferenceableInvariantLoad(AA))
1135     // Otherwise, this is a real load.  If there is a store between the load and
1136     // end of block, we can't move it.
1137     return !SawStore;
1138 
1139   return true;
1140 }
1141 
1142 bool MachineInstr::mayAlias(AliasAnalysis *AA, MachineInstr &Other,
1143                             bool UseTBAA) {
1144   const MachineFunction *MF = getMF();
1145   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1146   const MachineFrameInfo &MFI = MF->getFrameInfo();
1147 
1148   // If neither instruction stores to memory, they can't alias in any
1149   // meaningful way, even if they read from the same address.
1150   if (!mayStore() && !Other.mayStore())
1151     return false;
1152 
1153   // Let the target decide if memory accesses cannot possibly overlap.
1154   if (TII->areMemAccessesTriviallyDisjoint(*this, Other, AA))
1155     return false;
1156 
1157   // FIXME: Need to handle multiple memory operands to support all targets.
1158   if (!hasOneMemOperand() || !Other.hasOneMemOperand())
1159     return true;
1160 
1161   MachineMemOperand *MMOa = *memoperands_begin();
1162   MachineMemOperand *MMOb = *Other.memoperands_begin();
1163 
1164   // The following interface to AA is fashioned after DAGCombiner::isAlias
1165   // and operates with MachineMemOperand offset with some important
1166   // assumptions:
1167   //   - LLVM fundamentally assumes flat address spaces.
1168   //   - MachineOperand offset can *only* result from legalization and
1169   //     cannot affect queries other than the trivial case of overlap
1170   //     checking.
1171   //   - These offsets never wrap and never step outside
1172   //     of allocated objects.
1173   //   - There should never be any negative offsets here.
1174   //
1175   // FIXME: Modify API to hide this math from "user"
1176   // Even before we go to AA we can reason locally about some
1177   // memory objects. It can save compile time, and possibly catch some
1178   // corner cases not currently covered.
1179 
1180   int64_t OffsetA = MMOa->getOffset();
1181   int64_t OffsetB = MMOb->getOffset();
1182 
1183   int64_t MinOffset = std::min(OffsetA, OffsetB);
1184   int64_t WidthA = MMOa->getSize();
1185   int64_t WidthB = MMOb->getSize();
1186   const Value *ValA = MMOa->getValue();
1187   const Value *ValB = MMOb->getValue();
1188   bool SameVal = (ValA && ValB && (ValA == ValB));
1189   if (!SameVal) {
1190     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1191     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1192     if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1193       return false;
1194     if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1195       return false;
1196     if (PSVa && PSVb && (PSVa == PSVb))
1197       SameVal = true;
1198   }
1199 
1200   if (SameVal) {
1201     int64_t MaxOffset = std::max(OffsetA, OffsetB);
1202     int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
1203     return (MinOffset + LowWidth > MaxOffset);
1204   }
1205 
1206   if (!AA)
1207     return true;
1208 
1209   if (!ValA || !ValB)
1210     return true;
1211 
1212   assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1213   assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1214 
1215   int64_t Overlapa = WidthA + OffsetA - MinOffset;
1216   int64_t Overlapb = WidthB + OffsetB - MinOffset;
1217 
1218   AliasResult AAResult = AA->alias(
1219       MemoryLocation(ValA, Overlapa,
1220                      UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1221       MemoryLocation(ValB, Overlapb,
1222                      UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1223 
1224   return (AAResult != NoAlias);
1225 }
1226 
1227 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1228 /// or volatile memory reference, or if the information describing the memory
1229 /// reference is not available. Return false if it is known to have no ordered
1230 /// memory references.
1231 bool MachineInstr::hasOrderedMemoryRef() const {
1232   // An instruction known never to access memory won't have a volatile access.
1233   if (!mayStore() &&
1234       !mayLoad() &&
1235       !isCall() &&
1236       !hasUnmodeledSideEffects())
1237     return false;
1238 
1239   // Otherwise, if the instruction has no memory reference information,
1240   // conservatively assume it wasn't preserved.
1241   if (memoperands_empty())
1242     return true;
1243 
1244   // Check if any of our memory operands are ordered.
1245   return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1246     return !MMO->isUnordered();
1247   });
1248 }
1249 
1250 /// isDereferenceableInvariantLoad - Return true if this instruction will never
1251 /// trap and is loading from a location whose value is invariant across a run of
1252 /// this function.
1253 bool MachineInstr::isDereferenceableInvariantLoad(AliasAnalysis *AA) const {
1254   // If the instruction doesn't load at all, it isn't an invariant load.
1255   if (!mayLoad())
1256     return false;
1257 
1258   // If the instruction has lost its memoperands, conservatively assume that
1259   // it may not be an invariant load.
1260   if (memoperands_empty())
1261     return false;
1262 
1263   const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1264 
1265   for (MachineMemOperand *MMO : memoperands()) {
1266     if (MMO->isVolatile()) return false;
1267     if (MMO->isStore()) return false;
1268     if (MMO->isInvariant() && MMO->isDereferenceable())
1269       continue;
1270 
1271     // A load from a constant PseudoSourceValue is invariant.
1272     if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1273       if (PSV->isConstant(&MFI))
1274         continue;
1275 
1276     if (const Value *V = MMO->getValue()) {
1277       // If we have an AliasAnalysis, ask it whether the memory is constant.
1278       if (AA &&
1279           AA->pointsToConstantMemory(
1280               MemoryLocation(V, MMO->getSize(), MMO->getAAInfo())))
1281         continue;
1282     }
1283 
1284     // Otherwise assume conservatively.
1285     return false;
1286   }
1287 
1288   // Everything checks out.
1289   return true;
1290 }
1291 
1292 /// isConstantValuePHI - If the specified instruction is a PHI that always
1293 /// merges together the same virtual register, return the register, otherwise
1294 /// return 0.
1295 unsigned MachineInstr::isConstantValuePHI() const {
1296   if (!isPHI())
1297     return 0;
1298   assert(getNumOperands() >= 3 &&
1299          "It's illegal to have a PHI without source operands");
1300 
1301   unsigned Reg = getOperand(1).getReg();
1302   for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1303     if (getOperand(i).getReg() != Reg)
1304       return 0;
1305   return Reg;
1306 }
1307 
1308 bool MachineInstr::hasUnmodeledSideEffects() const {
1309   if (hasProperty(MCID::UnmodeledSideEffects))
1310     return true;
1311   if (isInlineAsm()) {
1312     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1313     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1314       return true;
1315   }
1316 
1317   return false;
1318 }
1319 
1320 bool MachineInstr::isLoadFoldBarrier() const {
1321   return mayStore() || isCall() || hasUnmodeledSideEffects();
1322 }
1323 
1324 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1325 ///
1326 bool MachineInstr::allDefsAreDead() const {
1327   for (const MachineOperand &MO : operands()) {
1328     if (!MO.isReg() || MO.isUse())
1329       continue;
1330     if (!MO.isDead())
1331       return false;
1332   }
1333   return true;
1334 }
1335 
1336 /// copyImplicitOps - Copy implicit register operands from specified
1337 /// instruction to this instruction.
1338 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1339                                    const MachineInstr &MI) {
1340   for (unsigned i = MI.getDesc().getNumOperands(), e = MI.getNumOperands();
1341        i != e; ++i) {
1342     const MachineOperand &MO = MI.getOperand(i);
1343     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1344       addOperand(MF, MO);
1345   }
1346 }
1347 
1348 bool MachineInstr::hasComplexRegisterTies() const {
1349   const MCInstrDesc &MCID = getDesc();
1350   for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1351     const auto &Operand = getOperand(I);
1352     if (!Operand.isReg() || Operand.isDef())
1353       // Ignore the defined registers as MCID marks only the uses as tied.
1354       continue;
1355     int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1356     int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1357     if (ExpectedTiedIdx != TiedIdx)
1358       return true;
1359   }
1360   return false;
1361 }
1362 
1363 LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1364                                  const MachineRegisterInfo &MRI) const {
1365   const MachineOperand &Op = getOperand(OpIdx);
1366   if (!Op.isReg())
1367     return LLT{};
1368 
1369   if (isVariadic() || OpIdx >= getNumExplicitOperands())
1370     return MRI.getType(Op.getReg());
1371 
1372   auto &OpInfo = getDesc().OpInfo[OpIdx];
1373   if (!OpInfo.isGenericType())
1374     return MRI.getType(Op.getReg());
1375 
1376   if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1377     return LLT{};
1378 
1379   LLT TypeToPrint = MRI.getType(Op.getReg());
1380   // Don't mark the type index printed if it wasn't actually printed: maybe
1381   // another operand with the same type index has an actual type attached:
1382   if (TypeToPrint.isValid())
1383     PrintedTypes.set(OpInfo.getGenericTypeIndex());
1384   return TypeToPrint;
1385 }
1386 
1387 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1388 LLVM_DUMP_METHOD void MachineInstr::dump() const {
1389   dbgs() << "  ";
1390   print(dbgs());
1391 }
1392 #endif
1393 
1394 void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1395                          bool SkipDebugLoc, bool AddNewLine,
1396                          const TargetInstrInfo *TII) const {
1397   const Module *M = nullptr;
1398   const Function *F = nullptr;
1399   if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1400     F = &MF->getFunction();
1401     M = F->getParent();
1402     if (!TII)
1403       TII = MF->getSubtarget().getInstrInfo();
1404   }
1405 
1406   ModuleSlotTracker MST(M);
1407   if (F)
1408     MST.incorporateFunction(*F);
1409   print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, TII);
1410 }
1411 
1412 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1413                          bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1414                          bool AddNewLine, const TargetInstrInfo *TII) const {
1415   // We can be a bit tidier if we know the MachineFunction.
1416   const MachineFunction *MF = nullptr;
1417   const TargetRegisterInfo *TRI = nullptr;
1418   const MachineRegisterInfo *MRI = nullptr;
1419   const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1420   tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1421 
1422   if (isCFIInstruction())
1423     assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1424 
1425   SmallBitVector PrintedTypes(8);
1426   bool ShouldPrintRegisterTies = hasComplexRegisterTies();
1427   auto getTiedOperandIdx = [&](unsigned OpIdx) {
1428     if (!ShouldPrintRegisterTies)
1429       return 0U;
1430     const MachineOperand &MO = getOperand(OpIdx);
1431     if (MO.isReg() && MO.isTied() && !MO.isDef())
1432       return findTiedOperandIdx(OpIdx);
1433     return 0U;
1434   };
1435   unsigned StartOp = 0;
1436   unsigned e = getNumOperands();
1437 
1438   // Print explicitly defined operands on the left of an assignment syntax.
1439   while (StartOp < e) {
1440     const MachineOperand &MO = getOperand(StartOp);
1441     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1442       break;
1443 
1444     if (StartOp != 0)
1445       OS << ", ";
1446 
1447     LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1448     unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1449     MO.print(OS, MST, TypeToPrint, /*PrintDef=*/false, IsStandalone,
1450              ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1451     ++StartOp;
1452   }
1453 
1454   if (StartOp != 0)
1455     OS << " = ";
1456 
1457   if (getFlag(MachineInstr::FrameSetup))
1458     OS << "frame-setup ";
1459   if (getFlag(MachineInstr::FrameDestroy))
1460     OS << "frame-destroy ";
1461   if (getFlag(MachineInstr::FmNoNans))
1462     OS << "nnan ";
1463   if (getFlag(MachineInstr::FmNoInfs))
1464     OS << "ninf ";
1465   if (getFlag(MachineInstr::FmNsz))
1466     OS << "nsz ";
1467   if (getFlag(MachineInstr::FmArcp))
1468     OS << "arcp ";
1469   if (getFlag(MachineInstr::FmContract))
1470     OS << "contract ";
1471   if (getFlag(MachineInstr::FmAfn))
1472     OS << "afn ";
1473   if (getFlag(MachineInstr::FmReassoc))
1474     OS << "reassoc ";
1475 
1476   // Print the opcode name.
1477   if (TII)
1478     OS << TII->getName(getOpcode());
1479   else
1480     OS << "UNKNOWN";
1481 
1482   if (SkipOpers)
1483     return;
1484 
1485   // Print the rest of the operands.
1486   bool FirstOp = true;
1487   unsigned AsmDescOp = ~0u;
1488   unsigned AsmOpCount = 0;
1489 
1490   if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1491     // Print asm string.
1492     OS << " ";
1493     const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1494     LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1495     unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1496     getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1497                             ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1498                             IntrinsicInfo);
1499 
1500     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1501     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1502     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1503       OS << " [sideeffect]";
1504     if (ExtraInfo & InlineAsm::Extra_MayLoad)
1505       OS << " [mayload]";
1506     if (ExtraInfo & InlineAsm::Extra_MayStore)
1507       OS << " [maystore]";
1508     if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1509       OS << " [isconvergent]";
1510     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1511       OS << " [alignstack]";
1512     if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1513       OS << " [attdialect]";
1514     if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1515       OS << " [inteldialect]";
1516 
1517     StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1518     FirstOp = false;
1519   }
1520 
1521   for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1522     const MachineOperand &MO = getOperand(i);
1523 
1524     if (FirstOp) FirstOp = false; else OS << ",";
1525     OS << " ";
1526 
1527     if (isDebugValue() && MO.isMetadata()) {
1528       // Pretty print DBG_VALUE instructions.
1529       auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1530       if (DIV && !DIV->getName().empty())
1531         OS << "!\"" << DIV->getName() << '\"';
1532       else {
1533         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1534         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1535         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1536                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1537       }
1538     } else if (isDebugLabel() && MO.isMetadata()) {
1539       // Pretty print DBG_LABEL instructions.
1540       auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1541       if (DIL && !DIL->getName().empty())
1542         OS << "\"" << DIL->getName() << '\"';
1543       else {
1544         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1545         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1546         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1547                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1548       }
1549     } else if (i == AsmDescOp && MO.isImm()) {
1550       // Pretty print the inline asm operand descriptor.
1551       OS << '$' << AsmOpCount++;
1552       unsigned Flag = MO.getImm();
1553       switch (InlineAsm::getKind(Flag)) {
1554       case InlineAsm::Kind_RegUse:             OS << ":[reguse"; break;
1555       case InlineAsm::Kind_RegDef:             OS << ":[regdef"; break;
1556       case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break;
1557       case InlineAsm::Kind_Clobber:            OS << ":[clobber"; break;
1558       case InlineAsm::Kind_Imm:                OS << ":[imm"; break;
1559       case InlineAsm::Kind_Mem:                OS << ":[mem"; break;
1560       default: OS << ":[??" << InlineAsm::getKind(Flag); break;
1561       }
1562 
1563       unsigned RCID = 0;
1564       if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1565           InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1566         if (TRI) {
1567           OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1568         } else
1569           OS << ":RC" << RCID;
1570       }
1571 
1572       if (InlineAsm::isMemKind(Flag)) {
1573         unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1574         switch (MCID) {
1575         case InlineAsm::Constraint_es: OS << ":es"; break;
1576         case InlineAsm::Constraint_i:  OS << ":i"; break;
1577         case InlineAsm::Constraint_m:  OS << ":m"; break;
1578         case InlineAsm::Constraint_o:  OS << ":o"; break;
1579         case InlineAsm::Constraint_v:  OS << ":v"; break;
1580         case InlineAsm::Constraint_Q:  OS << ":Q"; break;
1581         case InlineAsm::Constraint_R:  OS << ":R"; break;
1582         case InlineAsm::Constraint_S:  OS << ":S"; break;
1583         case InlineAsm::Constraint_T:  OS << ":T"; break;
1584         case InlineAsm::Constraint_Um: OS << ":Um"; break;
1585         case InlineAsm::Constraint_Un: OS << ":Un"; break;
1586         case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
1587         case InlineAsm::Constraint_Us: OS << ":Us"; break;
1588         case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
1589         case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
1590         case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
1591         case InlineAsm::Constraint_X:  OS << ":X"; break;
1592         case InlineAsm::Constraint_Z:  OS << ":Z"; break;
1593         case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
1594         case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
1595         default: OS << ":?"; break;
1596         }
1597       }
1598 
1599       unsigned TiedTo = 0;
1600       if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1601         OS << " tiedto:$" << TiedTo;
1602 
1603       OS << ']';
1604 
1605       // Compute the index of the next operand descriptor.
1606       AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
1607     } else {
1608       LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1609       unsigned TiedOperandIdx = getTiedOperandIdx(i);
1610       if (MO.isImm() && isOperandSubregIdx(i))
1611         MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1612       else
1613         MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, IsStandalone,
1614                  ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1615     }
1616   }
1617 
1618   // Print any optional symbols attached to this instruction as-if they were
1619   // operands.
1620   if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1621     if (!FirstOp) {
1622       FirstOp = false;
1623       OS << ',';
1624     }
1625     OS << " pre-instr-symbol ";
1626     MachineOperand::printSymbol(OS, *PreInstrSymbol);
1627   }
1628   if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1629     if (!FirstOp) {
1630       FirstOp = false;
1631       OS << ',';
1632     }
1633     OS << " post-instr-symbol ";
1634     MachineOperand::printSymbol(OS, *PostInstrSymbol);
1635   }
1636 
1637   if (!SkipDebugLoc) {
1638     if (const DebugLoc &DL = getDebugLoc()) {
1639       if (!FirstOp)
1640         OS << ',';
1641       OS << " debug-location ";
1642       DL->printAsOperand(OS, MST);
1643     }
1644   }
1645 
1646   if (!memoperands_empty()) {
1647     SmallVector<StringRef, 0> SSNs;
1648     const LLVMContext *Context = nullptr;
1649     std::unique_ptr<LLVMContext> CtxPtr;
1650     const MachineFrameInfo *MFI = nullptr;
1651     if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1652       MFI = &MF->getFrameInfo();
1653       Context = &MF->getFunction().getContext();
1654     } else {
1655       CtxPtr = llvm::make_unique<LLVMContext>();
1656       Context = CtxPtr.get();
1657     }
1658 
1659     OS << " :: ";
1660     bool NeedComma = false;
1661     for (const MachineMemOperand *Op : memoperands()) {
1662       if (NeedComma)
1663         OS << ", ";
1664       Op->print(OS, MST, SSNs, *Context, MFI, TII);
1665       NeedComma = true;
1666     }
1667   }
1668 
1669   if (SkipDebugLoc)
1670     return;
1671 
1672   bool HaveSemi = false;
1673 
1674   // Print debug location information.
1675   if (const DebugLoc &DL = getDebugLoc()) {
1676     if (!HaveSemi) {
1677       OS << ';';
1678       HaveSemi = true;
1679     }
1680     OS << ' ';
1681     DL.print(OS);
1682   }
1683 
1684   // Print extra comments for DEBUG_VALUE.
1685   if (isDebugValue() && getOperand(e - 2).isMetadata()) {
1686     if (!HaveSemi) {
1687       OS << ";";
1688       HaveSemi = true;
1689     }
1690     auto *DV = cast<DILocalVariable>(getOperand(e - 2).getMetadata());
1691     OS << " line no:" <<  DV->getLine();
1692     if (auto *InlinedAt = debugLoc->getInlinedAt()) {
1693       DebugLoc InlinedAtDL(InlinedAt);
1694       if (InlinedAtDL && MF) {
1695         OS << " inlined @[ ";
1696         InlinedAtDL.print(OS);
1697         OS << " ]";
1698       }
1699     }
1700     if (isIndirectDebugValue())
1701       OS << " indirect";
1702   }
1703   // TODO: DBG_LABEL
1704 
1705   if (AddNewLine)
1706     OS << '\n';
1707 }
1708 
1709 bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
1710                                      const TargetRegisterInfo *RegInfo,
1711                                      bool AddIfNotFound) {
1712   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg);
1713   bool hasAliases = isPhysReg &&
1714     MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
1715   bool Found = false;
1716   SmallVector<unsigned,4> DeadOps;
1717   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1718     MachineOperand &MO = getOperand(i);
1719     if (!MO.isReg() || !MO.isUse() || MO.isUndef())
1720       continue;
1721 
1722     // DEBUG_VALUE nodes do not contribute to code generation and should
1723     // always be ignored. Failure to do so may result in trying to modify
1724     // KILL flags on DEBUG_VALUE nodes.
1725     if (MO.isDebug())
1726       continue;
1727 
1728     unsigned Reg = MO.getReg();
1729     if (!Reg)
1730       continue;
1731 
1732     if (Reg == IncomingReg) {
1733       if (!Found) {
1734         if (MO.isKill())
1735           // The register is already marked kill.
1736           return true;
1737         if (isPhysReg && isRegTiedToDefOperand(i))
1738           // Two-address uses of physregs must not be marked kill.
1739           return true;
1740         MO.setIsKill();
1741         Found = true;
1742       }
1743     } else if (hasAliases && MO.isKill() &&
1744                TargetRegisterInfo::isPhysicalRegister(Reg)) {
1745       // A super-register kill already exists.
1746       if (RegInfo->isSuperRegister(IncomingReg, Reg))
1747         return true;
1748       if (RegInfo->isSubRegister(IncomingReg, Reg))
1749         DeadOps.push_back(i);
1750     }
1751   }
1752 
1753   // Trim unneeded kill operands.
1754   while (!DeadOps.empty()) {
1755     unsigned OpIdx = DeadOps.back();
1756     if (getOperand(OpIdx).isImplicit())
1757       RemoveOperand(OpIdx);
1758     else
1759       getOperand(OpIdx).setIsKill(false);
1760     DeadOps.pop_back();
1761   }
1762 
1763   // If not found, this means an alias of one of the operands is killed. Add a
1764   // new implicit operand if required.
1765   if (!Found && AddIfNotFound) {
1766     addOperand(MachineOperand::CreateReg(IncomingReg,
1767                                          false /*IsDef*/,
1768                                          true  /*IsImp*/,
1769                                          true  /*IsKill*/));
1770     return true;
1771   }
1772   return Found;
1773 }
1774 
1775 void MachineInstr::clearRegisterKills(unsigned Reg,
1776                                       const TargetRegisterInfo *RegInfo) {
1777   if (!TargetRegisterInfo::isPhysicalRegister(Reg))
1778     RegInfo = nullptr;
1779   for (MachineOperand &MO : operands()) {
1780     if (!MO.isReg() || !MO.isUse() || !MO.isKill())
1781       continue;
1782     unsigned OpReg = MO.getReg();
1783     if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
1784       MO.setIsKill(false);
1785   }
1786 }
1787 
1788 bool MachineInstr::addRegisterDead(unsigned Reg,
1789                                    const TargetRegisterInfo *RegInfo,
1790                                    bool AddIfNotFound) {
1791   bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg);
1792   bool hasAliases = isPhysReg &&
1793     MCRegAliasIterator(Reg, RegInfo, false).isValid();
1794   bool Found = false;
1795   SmallVector<unsigned,4> DeadOps;
1796   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1797     MachineOperand &MO = getOperand(i);
1798     if (!MO.isReg() || !MO.isDef())
1799       continue;
1800     unsigned MOReg = MO.getReg();
1801     if (!MOReg)
1802       continue;
1803 
1804     if (MOReg == Reg) {
1805       MO.setIsDead();
1806       Found = true;
1807     } else if (hasAliases && MO.isDead() &&
1808                TargetRegisterInfo::isPhysicalRegister(MOReg)) {
1809       // There exists a super-register that's marked dead.
1810       if (RegInfo->isSuperRegister(Reg, MOReg))
1811         return true;
1812       if (RegInfo->isSubRegister(Reg, MOReg))
1813         DeadOps.push_back(i);
1814     }
1815   }
1816 
1817   // Trim unneeded dead operands.
1818   while (!DeadOps.empty()) {
1819     unsigned OpIdx = DeadOps.back();
1820     if (getOperand(OpIdx).isImplicit())
1821       RemoveOperand(OpIdx);
1822     else
1823       getOperand(OpIdx).setIsDead(false);
1824     DeadOps.pop_back();
1825   }
1826 
1827   // If not found, this means an alias of one of the operands is dead. Add a
1828   // new implicit operand if required.
1829   if (Found || !AddIfNotFound)
1830     return Found;
1831 
1832   addOperand(MachineOperand::CreateReg(Reg,
1833                                        true  /*IsDef*/,
1834                                        true  /*IsImp*/,
1835                                        false /*IsKill*/,
1836                                        true  /*IsDead*/));
1837   return true;
1838 }
1839 
1840 void MachineInstr::clearRegisterDeads(unsigned Reg) {
1841   for (MachineOperand &MO : operands()) {
1842     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg)
1843       continue;
1844     MO.setIsDead(false);
1845   }
1846 }
1847 
1848 void MachineInstr::setRegisterDefReadUndef(unsigned Reg, bool IsUndef) {
1849   for (MachineOperand &MO : operands()) {
1850     if (!MO.isReg() || !MO.isDef() || MO.getReg() != Reg || MO.getSubReg() == 0)
1851       continue;
1852     MO.setIsUndef(IsUndef);
1853   }
1854 }
1855 
1856 void MachineInstr::addRegisterDefined(unsigned Reg,
1857                                       const TargetRegisterInfo *RegInfo) {
1858   if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1859     MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo);
1860     if (MO)
1861       return;
1862   } else {
1863     for (const MachineOperand &MO : operands()) {
1864       if (MO.isReg() && MO.getReg() == Reg && MO.isDef() &&
1865           MO.getSubReg() == 0)
1866         return;
1867     }
1868   }
1869   addOperand(MachineOperand::CreateReg(Reg,
1870                                        true  /*IsDef*/,
1871                                        true  /*IsImp*/));
1872 }
1873 
1874 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
1875                                          const TargetRegisterInfo &TRI) {
1876   bool HasRegMask = false;
1877   for (MachineOperand &MO : operands()) {
1878     if (MO.isRegMask()) {
1879       HasRegMask = true;
1880       continue;
1881     }
1882     if (!MO.isReg() || !MO.isDef()) continue;
1883     unsigned Reg = MO.getReg();
1884     if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
1885     // If there are no uses, including partial uses, the def is dead.
1886     if (llvm::none_of(UsedRegs,
1887                       [&](unsigned Use) { return TRI.regsOverlap(Use, Reg); }))
1888       MO.setIsDead();
1889   }
1890 
1891   // This is a call with a register mask operand.
1892   // Mask clobbers are always dead, so add defs for the non-dead defines.
1893   if (HasRegMask)
1894     for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
1895          I != E; ++I)
1896       addRegisterDefined(*I, &TRI);
1897 }
1898 
1899 unsigned
1900 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
1901   // Build up a buffer of hash code components.
1902   SmallVector<size_t, 8> HashComponents;
1903   HashComponents.reserve(MI->getNumOperands() + 1);
1904   HashComponents.push_back(MI->getOpcode());
1905   for (const MachineOperand &MO : MI->operands()) {
1906     if (MO.isReg() && MO.isDef() &&
1907         TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1908       continue;  // Skip virtual register defs.
1909 
1910     HashComponents.push_back(hash_value(MO));
1911   }
1912   return hash_combine_range(HashComponents.begin(), HashComponents.end());
1913 }
1914 
1915 void MachineInstr::emitError(StringRef Msg) const {
1916   // Find the source location cookie.
1917   unsigned LocCookie = 0;
1918   const MDNode *LocMD = nullptr;
1919   for (unsigned i = getNumOperands(); i != 0; --i) {
1920     if (getOperand(i-1).isMetadata() &&
1921         (LocMD = getOperand(i-1).getMetadata()) &&
1922         LocMD->getNumOperands() != 0) {
1923       if (const ConstantInt *CI =
1924               mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) {
1925         LocCookie = CI->getZExtValue();
1926         break;
1927       }
1928     }
1929   }
1930 
1931   if (const MachineBasicBlock *MBB = getParent())
1932     if (const MachineFunction *MF = MBB->getParent())
1933       return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
1934   report_fatal_error(Msg);
1935 }
1936 
1937 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1938                                   const MCInstrDesc &MCID, bool IsIndirect,
1939                                   unsigned Reg, const MDNode *Variable,
1940                                   const MDNode *Expr) {
1941   assert(isa<DILocalVariable>(Variable) && "not a variable");
1942   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1943   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1944          "Expected inlined-at fields to agree");
1945   auto MIB = BuildMI(MF, DL, MCID).addReg(Reg, RegState::Debug);
1946   if (IsIndirect)
1947     MIB.addImm(0U);
1948   else
1949     MIB.addReg(0U, RegState::Debug);
1950   return MIB.addMetadata(Variable).addMetadata(Expr);
1951 }
1952 
1953 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
1954                                   const MCInstrDesc &MCID, bool IsIndirect,
1955                                   MachineOperand &MO, const MDNode *Variable,
1956                                   const MDNode *Expr) {
1957   assert(isa<DILocalVariable>(Variable) && "not a variable");
1958   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
1959   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
1960          "Expected inlined-at fields to agree");
1961   if (MO.isReg())
1962     return BuildMI(MF, DL, MCID, IsIndirect, MO.getReg(), Variable, Expr);
1963 
1964   auto MIB = BuildMI(MF, DL, MCID).add(MO);
1965   if (IsIndirect)
1966     MIB.addImm(0U);
1967   else
1968     MIB.addReg(0U, RegState::Debug);
1969   return MIB.addMetadata(Variable).addMetadata(Expr);
1970  }
1971 
1972 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1973                                   MachineBasicBlock::iterator I,
1974                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1975                                   bool IsIndirect, unsigned Reg,
1976                                   const MDNode *Variable, const MDNode *Expr) {
1977   MachineFunction &MF = *BB.getParent();
1978   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
1979   BB.insert(I, MI);
1980   return MachineInstrBuilder(MF, MI);
1981 }
1982 
1983 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
1984                                   MachineBasicBlock::iterator I,
1985                                   const DebugLoc &DL, const MCInstrDesc &MCID,
1986                                   bool IsIndirect, MachineOperand &MO,
1987                                   const MDNode *Variable, const MDNode *Expr) {
1988   MachineFunction &MF = *BB.getParent();
1989   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, MO, Variable, Expr);
1990   BB.insert(I, MI);
1991   return MachineInstrBuilder(MF, *MI);
1992 }
1993 
1994 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
1995 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
1996 static const DIExpression *computeExprForSpill(const MachineInstr &MI) {
1997   assert(MI.getOperand(0).isReg() && "can't spill non-register");
1998   assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
1999          "Expected inlined-at fields to agree");
2000 
2001   const DIExpression *Expr = MI.getDebugExpression();
2002   if (MI.isIndirectDebugValue()) {
2003     assert(MI.getOperand(1).getImm() == 0 && "DBG_VALUE with nonzero offset");
2004     Expr = DIExpression::prepend(Expr, DIExpression::WithDeref);
2005   }
2006   return Expr;
2007 }
2008 
2009 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2010                                           MachineBasicBlock::iterator I,
2011                                           const MachineInstr &Orig,
2012                                           int FrameIndex) {
2013   const DIExpression *Expr = computeExprForSpill(Orig);
2014   return BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc())
2015       .addFrameIndex(FrameIndex)
2016       .addImm(0U)
2017       .addMetadata(Orig.getDebugVariable())
2018       .addMetadata(Expr);
2019 }
2020 
2021 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex) {
2022   const DIExpression *Expr = computeExprForSpill(Orig);
2023   Orig.getOperand(0).ChangeToFrameIndex(FrameIndex);
2024   Orig.getOperand(1).ChangeToImmediate(0U);
2025   Orig.getOperand(3).setMetadata(Expr);
2026 }
2027