1 //==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the Emit routines for the SelectionDAG class, which creates
11 // MachineInstrs based on the decisions of the SelectionDAG instruction
12 // selection.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "InstrEmitter.h"
17 #include "SDNodeDbgValue.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/CodeGen/MachineConstantPool.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/StackMaps.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DebugInfo.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetSubtargetInfo.h"
32 using namespace llvm;
33 
34 #define DEBUG_TYPE "instr-emitter"
35 
36 /// MinRCSize - Smallest register class we allow when constraining virtual
37 /// registers.  If satisfying all register class constraints would require
38 /// using a smaller register class, emit a COPY to a new virtual register
39 /// instead.
40 const unsigned MinRCSize = 4;
41 
42 /// CountResults - The results of target nodes have register or immediate
43 /// operands first, then an optional chain, and optional glue operands (which do
44 /// not go into the resulting MachineInstr).
45 unsigned InstrEmitter::CountResults(SDNode *Node) {
46   unsigned N = Node->getNumValues();
47   while (N && Node->getValueType(N - 1) == MVT::Glue)
48     --N;
49   if (N && Node->getValueType(N - 1) == MVT::Other)
50     --N;    // Skip over chain result.
51   return N;
52 }
53 
54 /// countOperands - The inputs to target nodes have any actual inputs first,
55 /// followed by an optional chain operand, then an optional glue operand.
56 /// Compute the number of actual operands that will go into the resulting
57 /// MachineInstr.
58 ///
59 /// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
60 /// the chain and glue. These operands may be implicit on the machine instr.
61 static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
62                               unsigned &NumImpUses) {
63   unsigned N = Node->getNumOperands();
64   while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
65     --N;
66   if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
67     --N; // Ignore chain if it exists.
68 
69   // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
70   NumImpUses = N - NumExpUses;
71   for (unsigned I = N; I > NumExpUses; --I) {
72     if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
73       continue;
74     if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
75       if (TargetRegisterInfo::isPhysicalRegister(RN->getReg()))
76         continue;
77     NumImpUses = N - I;
78     break;
79   }
80 
81   return N;
82 }
83 
84 /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
85 /// implicit physical register output.
86 void InstrEmitter::
87 EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
88                 unsigned SrcReg, DenseMap<SDValue, unsigned> &VRBaseMap) {
89   unsigned VRBase = 0;
90   if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
91     // Just use the input register directly!
92     SDValue Op(Node, ResNo);
93     if (IsClone)
94       VRBaseMap.erase(Op);
95     bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
96     (void)isNew; // Silence compiler warning.
97     assert(isNew && "Node emitted out of order - early");
98     return;
99   }
100 
101   // If the node is only used by a CopyToReg and the dest reg is a vreg, use
102   // the CopyToReg'd destination register instead of creating a new vreg.
103   bool MatchReg = true;
104   const TargetRegisterClass *UseRC = nullptr;
105   MVT VT = Node->getSimpleValueType(ResNo);
106 
107   // Stick to the preferred register classes for legal types.
108   if (TLI->isTypeLegal(VT))
109     UseRC = TLI->getRegClassFor(VT);
110 
111   if (!IsClone && !IsCloned)
112     for (SDNode *User : Node->uses()) {
113       bool Match = true;
114       if (User->getOpcode() == ISD::CopyToReg &&
115           User->getOperand(2).getNode() == Node &&
116           User->getOperand(2).getResNo() == ResNo) {
117         unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
118         if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
119           VRBase = DestReg;
120           Match = false;
121         } else if (DestReg != SrcReg)
122           Match = false;
123       } else {
124         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
125           SDValue Op = User->getOperand(i);
126           if (Op.getNode() != Node || Op.getResNo() != ResNo)
127             continue;
128           MVT VT = Node->getSimpleValueType(Op.getResNo());
129           if (VT == MVT::Other || VT == MVT::Glue)
130             continue;
131           Match = false;
132           if (User->isMachineOpcode()) {
133             const MCInstrDesc &II = TII->get(User->getMachineOpcode());
134             const TargetRegisterClass *RC = nullptr;
135             if (i+II.getNumDefs() < II.getNumOperands()) {
136               RC = TRI->getAllocatableClass(
137                 TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF));
138             }
139             if (!UseRC)
140               UseRC = RC;
141             else if (RC) {
142               const TargetRegisterClass *ComRC =
143                 TRI->getCommonSubClass(UseRC, RC, VT.SimpleTy);
144               // If multiple uses expect disjoint register classes, we emit
145               // copies in AddRegisterOperand.
146               if (ComRC)
147                 UseRC = ComRC;
148             }
149           }
150         }
151       }
152       MatchReg &= Match;
153       if (VRBase)
154         break;
155     }
156 
157   const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
158   SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
159 
160   // Figure out the register class to create for the destreg.
161   if (VRBase) {
162     DstRC = MRI->getRegClass(VRBase);
163   } else if (UseRC) {
164     assert(UseRC->hasType(VT) && "Incompatible phys register def and uses!");
165     DstRC = UseRC;
166   } else {
167     DstRC = TLI->getRegClassFor(VT);
168   }
169 
170   // If all uses are reading from the src physical register and copying the
171   // register is either impossible or very expensive, then don't create a copy.
172   if (MatchReg && SrcRC->getCopyCost() < 0) {
173     VRBase = SrcReg;
174   } else {
175     // Create the reg, emit the copy.
176     VRBase = MRI->createVirtualRegister(DstRC);
177     BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
178             VRBase).addReg(SrcReg);
179   }
180 
181   SDValue Op(Node, ResNo);
182   if (IsClone)
183     VRBaseMap.erase(Op);
184   bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
185   (void)isNew; // Silence compiler warning.
186   assert(isNew && "Node emitted out of order - early");
187 }
188 
189 /// getDstOfCopyToRegUse - If the only use of the specified result number of
190 /// node is a CopyToReg, return its destination register. Return 0 otherwise.
191 unsigned InstrEmitter::getDstOfOnlyCopyToRegUse(SDNode *Node,
192                                                 unsigned ResNo) const {
193   if (!Node->hasOneUse())
194     return 0;
195 
196   SDNode *User = *Node->use_begin();
197   if (User->getOpcode() == ISD::CopyToReg &&
198       User->getOperand(2).getNode() == Node &&
199       User->getOperand(2).getResNo() == ResNo) {
200     unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
201     if (TargetRegisterInfo::isVirtualRegister(Reg))
202       return Reg;
203   }
204   return 0;
205 }
206 
207 void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
208                                        MachineInstrBuilder &MIB,
209                                        const MCInstrDesc &II,
210                                        bool IsClone, bool IsCloned,
211                                        DenseMap<SDValue, unsigned> &VRBaseMap) {
212   assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
213          "IMPLICIT_DEF should have been handled as a special case elsewhere!");
214 
215   unsigned NumResults = CountResults(Node);
216   for (unsigned i = 0; i < II.getNumDefs(); ++i) {
217     // If the specific node value is only used by a CopyToReg and the dest reg
218     // is a vreg in the same register class, use the CopyToReg'd destination
219     // register instead of creating a new vreg.
220     unsigned VRBase = 0;
221     const TargetRegisterClass *RC =
222       TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF));
223     // Always let the value type influence the used register class. The
224     // constraints on the instruction may be too lax to represent the value
225     // type correctly. For example, a 64-bit float (X86::FR64) can't live in
226     // the 32-bit float super-class (X86::FR32).
227     if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
228       const TargetRegisterClass *VTRC =
229         TLI->getRegClassFor(Node->getSimpleValueType(i));
230       if (RC)
231         VTRC = TRI->getCommonSubClass(RC, VTRC);
232       if (VTRC)
233         RC = VTRC;
234     }
235 
236     if (II.OpInfo[i].isOptionalDef()) {
237       // Optional def must be a physical register.
238       unsigned NumResults = CountResults(Node);
239       VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
240       assert(TargetRegisterInfo::isPhysicalRegister(VRBase));
241       MIB.addReg(VRBase, RegState::Define);
242     }
243 
244     if (!VRBase && !IsClone && !IsCloned)
245       for (SDNode *User : Node->uses()) {
246         if (User->getOpcode() == ISD::CopyToReg &&
247             User->getOperand(2).getNode() == Node &&
248             User->getOperand(2).getResNo() == i) {
249           unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
250           if (TargetRegisterInfo::isVirtualRegister(Reg)) {
251             const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
252             if (RegRC == RC) {
253               VRBase = Reg;
254               MIB.addReg(VRBase, RegState::Define);
255               break;
256             }
257           }
258         }
259       }
260 
261     // Create the result registers for this node and add the result regs to
262     // the machine instruction.
263     if (VRBase == 0) {
264       assert(RC && "Isn't a register operand!");
265       VRBase = MRI->createVirtualRegister(RC);
266       MIB.addReg(VRBase, RegState::Define);
267     }
268 
269     // If this def corresponds to a result of the SDNode insert the VRBase into
270     // the lookup map.
271     if (i < NumResults) {
272       SDValue Op(Node, i);
273       if (IsClone)
274         VRBaseMap.erase(Op);
275       bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
276       (void)isNew; // Silence compiler warning.
277       assert(isNew && "Node emitted out of order - early");
278     }
279   }
280 }
281 
282 /// getVR - Return the virtual register corresponding to the specified result
283 /// of the specified node.
284 unsigned InstrEmitter::getVR(SDValue Op,
285                              DenseMap<SDValue, unsigned> &VRBaseMap) {
286   if (Op.isMachineOpcode() &&
287       Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
288     // Add an IMPLICIT_DEF instruction before every use.
289     unsigned VReg = getDstOfOnlyCopyToRegUse(Op.getNode(), Op.getResNo());
290     // IMPLICIT_DEF can produce any type of result so its MCInstrDesc
291     // does not include operand register class info.
292     if (!VReg) {
293       const TargetRegisterClass *RC =
294         TLI->getRegClassFor(Op.getSimpleValueType());
295       VReg = MRI->createVirtualRegister(RC);
296     }
297     BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
298             TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
299     return VReg;
300   }
301 
302   DenseMap<SDValue, unsigned>::iterator I = VRBaseMap.find(Op);
303   assert(I != VRBaseMap.end() && "Node emitted out of order - late");
304   return I->second;
305 }
306 
307 
308 /// AddRegisterOperand - Add the specified register as an operand to the
309 /// specified machine instr. Insert register copies if the register is
310 /// not in the required register class.
311 void
312 InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
313                                  SDValue Op,
314                                  unsigned IIOpNum,
315                                  const MCInstrDesc *II,
316                                  DenseMap<SDValue, unsigned> &VRBaseMap,
317                                  bool IsDebug, bool IsClone, bool IsCloned) {
318   assert(Op.getValueType() != MVT::Other &&
319          Op.getValueType() != MVT::Glue &&
320          "Chain and glue operands should occur at end of operand list!");
321   // Get/emit the operand.
322   unsigned VReg = getVR(Op, VRBaseMap);
323   assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
324 
325   const MCInstrDesc &MCID = MIB->getDesc();
326   bool isOptDef = IIOpNum < MCID.getNumOperands() &&
327     MCID.OpInfo[IIOpNum].isOptionalDef();
328 
329   // If the instruction requires a register in a different class, create
330   // a new virtual register and copy the value into it, but first attempt to
331   // shrink VReg's register class within reason.  For example, if VReg == GR32
332   // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
333   if (II) {
334     const TargetRegisterClass *DstRC = nullptr;
335     if (IIOpNum < II->getNumOperands())
336       DstRC = TRI->getAllocatableClass(TII->getRegClass(*II,IIOpNum,TRI,*MF));
337     if (DstRC && !MRI->constrainRegClass(VReg, DstRC, MinRCSize)) {
338       unsigned NewVReg = MRI->createVirtualRegister(DstRC);
339       BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
340               TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
341       VReg = NewVReg;
342     }
343   }
344 
345   // If this value has only one use, that use is a kill. This is a
346   // conservative approximation. InstrEmitter does trivial coalescing
347   // with CopyFromReg nodes, so don't emit kill flags for them.
348   // Avoid kill flags on Schedule cloned nodes, since there will be
349   // multiple uses.
350   // Tied operands are never killed, so we need to check that. And that
351   // means we need to determine the index of the operand.
352   bool isKill = Op.hasOneUse() &&
353                 Op.getNode()->getOpcode() != ISD::CopyFromReg &&
354                 !IsDebug &&
355                 !(IsClone || IsCloned);
356   if (isKill) {
357     unsigned Idx = MIB->getNumOperands();
358     while (Idx > 0 &&
359            MIB->getOperand(Idx-1).isReg() &&
360            MIB->getOperand(Idx-1).isImplicit())
361       --Idx;
362     bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
363     if (isTied)
364       isKill = false;
365   }
366 
367   MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) |
368              getDebugRegState(IsDebug));
369 }
370 
371 /// AddOperand - Add the specified operand to the specified machine instr.  II
372 /// specifies the instruction information for the node, and IIOpNum is the
373 /// operand number (in the II) that we are adding.
374 void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
375                               SDValue Op,
376                               unsigned IIOpNum,
377                               const MCInstrDesc *II,
378                               DenseMap<SDValue, unsigned> &VRBaseMap,
379                               bool IsDebug, bool IsClone, bool IsCloned) {
380   if (Op.isMachineOpcode()) {
381     AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
382                        IsDebug, IsClone, IsCloned);
383   } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
384     MIB.addImm(C->getSExtValue());
385   } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
386     MIB.addFPImm(F->getConstantFPValue());
387   } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
388     // Turn additional physreg operands into implicit uses on non-variadic
389     // instructions. This is used by call and return instructions passing
390     // arguments in registers.
391     bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
392     MIB.addReg(R->getReg(), getImplRegState(Imp));
393   } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
394     MIB.addRegMask(RM->getRegMask());
395   } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
396     MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(),
397                          TGA->getTargetFlags());
398   } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
399     MIB.addMBB(BBNode->getBasicBlock());
400   } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
401     MIB.addFrameIndex(FI->getIndex());
402   } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
403     MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags());
404   } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
405     int Offset = CP->getOffset();
406     unsigned Align = CP->getAlignment();
407     Type *Type = CP->getType();
408     // MachineConstantPool wants an explicit alignment.
409     if (Align == 0) {
410       Align = MF->getDataLayout().getPrefTypeAlignment(Type);
411       if (Align == 0) {
412         // Alignment of vector types.  FIXME!
413         Align = MF->getDataLayout().getTypeAllocSize(Type);
414       }
415     }
416 
417     unsigned Idx;
418     MachineConstantPool *MCP = MF->getConstantPool();
419     if (CP->isMachineConstantPoolEntry())
420       Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Align);
421     else
422       Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Align);
423     MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags());
424   } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
425     MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags());
426   } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) {
427     MIB.addSym(SymNode->getMCSymbol());
428   } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
429     MIB.addBlockAddress(BA->getBlockAddress(),
430                         BA->getOffset(),
431                         BA->getTargetFlags());
432   } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
433     MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags());
434   } else {
435     assert(Op.getValueType() != MVT::Other &&
436            Op.getValueType() != MVT::Glue &&
437            "Chain and glue operands should occur at end of operand list!");
438     AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
439                        IsDebug, IsClone, IsCloned);
440   }
441 }
442 
443 unsigned InstrEmitter::ConstrainForSubReg(unsigned VReg, unsigned SubIdx,
444                                           MVT VT, DebugLoc DL) {
445   const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
446   const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);
447 
448   // RC is a sub-class of VRC that supports SubIdx.  Try to constrain VReg
449   // within reason.
450   if (RC && RC != VRC)
451     RC = MRI->constrainRegClass(VReg, RC, MinRCSize);
452 
453   // VReg has been adjusted.  It can be used with SubIdx operands now.
454   if (RC)
455     return VReg;
456 
457   // VReg couldn't be reasonably constrained.  Emit a COPY to a new virtual
458   // register instead.
459   RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT), SubIdx);
460   assert(RC && "No legal register class for VT supports that SubIdx");
461   unsigned NewReg = MRI->createVirtualRegister(RC);
462   BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
463     .addReg(VReg);
464   return NewReg;
465 }
466 
467 /// EmitSubregNode - Generate machine code for subreg nodes.
468 ///
469 void InstrEmitter::EmitSubregNode(SDNode *Node,
470                                   DenseMap<SDValue, unsigned> &VRBaseMap,
471                                   bool IsClone, bool IsCloned) {
472   unsigned VRBase = 0;
473   unsigned Opc = Node->getMachineOpcode();
474 
475   // If the node is only used by a CopyToReg and the dest reg is a vreg, use
476   // the CopyToReg'd destination register instead of creating a new vreg.
477   for (SDNode *User : Node->uses()) {
478     if (User->getOpcode() == ISD::CopyToReg &&
479         User->getOperand(2).getNode() == Node) {
480       unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
481       if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
482         VRBase = DestReg;
483         break;
484       }
485     }
486   }
487 
488   if (Opc == TargetOpcode::EXTRACT_SUBREG) {
489     // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub.  There are no
490     // constraints on the %dst register, COPY can target all legal register
491     // classes.
492     unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
493     const TargetRegisterClass *TRC =
494       TLI->getRegClassFor(Node->getSimpleValueType(0));
495 
496     unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
497     MachineInstr *DefMI = MRI->getVRegDef(VReg);
498     unsigned SrcReg, DstReg, DefSubIdx;
499     if (DefMI &&
500         TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
501         SubIdx == DefSubIdx &&
502         TRC == MRI->getRegClass(SrcReg)) {
503       // Optimize these:
504       // r1025 = s/zext r1024, 4
505       // r1026 = extract_subreg r1025, 4
506       // to a copy
507       // r1026 = copy r1024
508       VRBase = MRI->createVirtualRegister(TRC);
509       BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
510               TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
511       MRI->clearKillFlags(SrcReg);
512     } else {
513       // VReg may not support a SubIdx sub-register, and we may need to
514       // constrain its register class or issue a COPY to a compatible register
515       // class.
516       VReg = ConstrainForSubReg(VReg, SubIdx,
517                                 Node->getOperand(0).getSimpleValueType(),
518                                 Node->getDebugLoc());
519 
520       // Create the destreg if it is missing.
521       if (VRBase == 0)
522         VRBase = MRI->createVirtualRegister(TRC);
523 
524       // Create the extract_subreg machine instruction.
525       BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
526               TII->get(TargetOpcode::COPY), VRBase).addReg(VReg, 0, SubIdx);
527     }
528   } else if (Opc == TargetOpcode::INSERT_SUBREG ||
529              Opc == TargetOpcode::SUBREG_TO_REG) {
530     SDValue N0 = Node->getOperand(0);
531     SDValue N1 = Node->getOperand(1);
532     SDValue N2 = Node->getOperand(2);
533     unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
534 
535     // Figure out the register class to create for the destreg.  It should be
536     // the largest legal register class supporting SubIdx sub-registers.
537     // RegisterCoalescer will constrain it further if it decides to eliminate
538     // the INSERT_SUBREG instruction.
539     //
540     //   %dst = INSERT_SUBREG %src, %sub, SubIdx
541     //
542     // is lowered by TwoAddressInstructionPass to:
543     //
544     //   %dst = COPY %src
545     //   %dst:SubIdx = COPY %sub
546     //
547     // There is no constraint on the %src register class.
548     //
549     const TargetRegisterClass *SRC = TLI->getRegClassFor(Node->getSimpleValueType(0));
550     SRC = TRI->getSubClassWithSubReg(SRC, SubIdx);
551     assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG");
552 
553     if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase)))
554       VRBase = MRI->createVirtualRegister(SRC);
555 
556     // Create the insert_subreg or subreg_to_reg machine instruction.
557     MachineInstrBuilder MIB =
558       BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase);
559 
560     // If creating a subreg_to_reg, then the first input operand
561     // is an implicit value immediate, otherwise it's a register
562     if (Opc == TargetOpcode::SUBREG_TO_REG) {
563       const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
564       MIB.addImm(SD->getZExtValue());
565     } else
566       AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
567                  IsClone, IsCloned);
568     // Add the subregster being inserted
569     AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
570                IsClone, IsCloned);
571     MIB.addImm(SubIdx);
572     MBB->insert(InsertPos, MIB);
573   } else
574     llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg");
575 
576   SDValue Op(Node, 0);
577   bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
578   (void)isNew; // Silence compiler warning.
579   assert(isNew && "Node emitted out of order - early");
580 }
581 
582 /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
583 /// COPY_TO_REGCLASS is just a normal copy, except that the destination
584 /// register is constrained to be in a particular register class.
585 ///
586 void
587 InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
588                                      DenseMap<SDValue, unsigned> &VRBaseMap) {
589   unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
590 
591   // Create the new VReg in the destination class and emit a copy.
592   unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
593   const TargetRegisterClass *DstRC =
594     TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
595   unsigned NewVReg = MRI->createVirtualRegister(DstRC);
596   BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
597     NewVReg).addReg(VReg);
598 
599   SDValue Op(Node, 0);
600   bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
601   (void)isNew; // Silence compiler warning.
602   assert(isNew && "Node emitted out of order - early");
603 }
604 
605 /// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
606 ///
607 void InstrEmitter::EmitRegSequence(SDNode *Node,
608                                   DenseMap<SDValue, unsigned> &VRBaseMap,
609                                   bool IsClone, bool IsCloned) {
610   unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
611   const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
612   unsigned NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
613   const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
614   MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
615   unsigned NumOps = Node->getNumOperands();
616   assert((NumOps & 1) == 1 &&
617          "REG_SEQUENCE must have an odd number of operands!");
618   for (unsigned i = 1; i != NumOps; ++i) {
619     SDValue Op = Node->getOperand(i);
620     if ((i & 1) == 0) {
621       RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
622       // Skip physical registers as they don't have a vreg to get and we'll
623       // insert copies for them in TwoAddressInstructionPass anyway.
624       if (!R || !TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
625         unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
626         unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
627         const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
628         const TargetRegisterClass *SRC =
629         TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
630         if (SRC && SRC != RC) {
631           MRI->setRegClass(NewVReg, SRC);
632           RC = SRC;
633         }
634       }
635     }
636     AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
637                IsClone, IsCloned);
638   }
639 
640   MBB->insert(InsertPos, MIB);
641   SDValue Op(Node, 0);
642   bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
643   (void)isNew; // Silence compiler warning.
644   assert(isNew && "Node emitted out of order - early");
645 }
646 
647 /// EmitDbgValue - Generate machine instruction for a dbg_value node.
648 ///
649 MachineInstr *
650 InstrEmitter::EmitDbgValue(SDDbgValue *SD,
651                            DenseMap<SDValue, unsigned> &VRBaseMap) {
652   uint64_t Offset = SD->getOffset();
653   MDNode *Var = SD->getVariable();
654   MDNode *Expr = SD->getExpression();
655   DebugLoc DL = SD->getDebugLoc();
656   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
657          "Expected inlined-at fields to agree");
658 
659   if (SD->getKind() == SDDbgValue::FRAMEIX) {
660     // Stack address; this needs to be lowered in target-dependent fashion.
661     // EmitTargetCodeForFrameDebugValue is responsible for allocation.
662     return BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE))
663         .addFrameIndex(SD->getFrameIx())
664         .addImm(Offset)
665         .addMetadata(Var)
666         .addMetadata(Expr);
667   }
668   // Otherwise, we're going to create an instruction here.
669   const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
670   MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
671   if (SD->getKind() == SDDbgValue::SDNODE) {
672     SDNode *Node = SD->getSDNode();
673     SDValue Op = SDValue(Node, SD->getResNo());
674     // It's possible we replaced this SDNode with other(s) and therefore
675     // didn't generate code for it.  It's better to catch these cases where
676     // they happen and transfer the debug info, but trying to guarantee that
677     // in all cases would be very fragile; this is a safeguard for any
678     // that were missed.
679     DenseMap<SDValue, unsigned>::iterator I = VRBaseMap.find(Op);
680     if (I==VRBaseMap.end())
681       MIB.addReg(0U);       // undef
682     else
683       AddOperand(MIB, Op, (*MIB).getNumOperands(), &II, VRBaseMap,
684                  /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
685   } else if (SD->getKind() == SDDbgValue::CONST) {
686     const Value *V = SD->getConst();
687     if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
688       if (CI->getBitWidth() > 64)
689         MIB.addCImm(CI);
690       else
691         MIB.addImm(CI->getSExtValue());
692     } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
693       MIB.addFPImm(CF);
694     } else {
695       // Could be an Undef.  In any case insert an Undef so we can see what we
696       // dropped.
697       MIB.addReg(0U);
698     }
699   } else {
700     // Insert an Undef so we can see what we dropped.
701     MIB.addReg(0U);
702   }
703 
704   // Indirect addressing is indicated by an Imm as the second parameter.
705   if (SD->isIndirect())
706     MIB.addImm(Offset);
707   else {
708     assert(Offset == 0 && "direct value cannot have an offset");
709     MIB.addReg(0U, RegState::Debug);
710   }
711 
712   MIB.addMetadata(Var);
713   MIB.addMetadata(Expr);
714 
715   return &*MIB;
716 }
717 
718 /// EmitMachineNode - Generate machine code for a target-specific node and
719 /// needed dependencies.
720 ///
721 void InstrEmitter::
722 EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
723                 DenseMap<SDValue, unsigned> &VRBaseMap) {
724   unsigned Opc = Node->getMachineOpcode();
725 
726   // Handle subreg insert/extract specially
727   if (Opc == TargetOpcode::EXTRACT_SUBREG ||
728       Opc == TargetOpcode::INSERT_SUBREG ||
729       Opc == TargetOpcode::SUBREG_TO_REG) {
730     EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
731     return;
732   }
733 
734   // Handle COPY_TO_REGCLASS specially.
735   if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
736     EmitCopyToRegClassNode(Node, VRBaseMap);
737     return;
738   }
739 
740   // Handle REG_SEQUENCE specially.
741   if (Opc == TargetOpcode::REG_SEQUENCE) {
742     EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
743     return;
744   }
745 
746   if (Opc == TargetOpcode::IMPLICIT_DEF)
747     // We want a unique VR for each IMPLICIT_DEF use.
748     return;
749 
750   const MCInstrDesc &II = TII->get(Opc);
751   unsigned NumResults = CountResults(Node);
752   unsigned NumDefs = II.getNumDefs();
753   const MCPhysReg *ScratchRegs = nullptr;
754 
755   // Handle STACKMAP and PATCHPOINT specially and then use the generic code.
756   if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
757     // Stackmaps do not have arguments and do not preserve their calling
758     // convention. However, to simplify runtime support, they clobber the same
759     // scratch registers as AnyRegCC.
760     unsigned CC = CallingConv::AnyReg;
761     if (Opc == TargetOpcode::PATCHPOINT) {
762       CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
763       NumDefs = NumResults;
764     }
765     ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
766   }
767 
768   unsigned NumImpUses = 0;
769   unsigned NodeOperands =
770     countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
771   bool HasPhysRegOuts = NumResults > NumDefs && II.getImplicitDefs()!=nullptr;
772 #ifndef NDEBUG
773   unsigned NumMIOperands = NodeOperands + NumResults;
774   if (II.isVariadic())
775     assert(NumMIOperands >= II.getNumOperands() &&
776            "Too few operands for a variadic node!");
777   else
778     assert(NumMIOperands >= II.getNumOperands() &&
779            NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() +
780                             NumImpUses &&
781            "#operands for dag node doesn't match .td file!");
782 #endif
783 
784   // Create the new machine instruction.
785   MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II);
786 
787   // Add result register values for things that are defined by this
788   // instruction.
789   if (NumResults)
790     CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap);
791 
792   // Emit all of the actual operands of this instruction, adding them to the
793   // instruction as appropriate.
794   bool HasOptPRefs = NumDefs > NumResults;
795   assert((!HasOptPRefs || !HasPhysRegOuts) &&
796          "Unable to cope with optional defs and phys regs defs!");
797   unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0;
798   for (unsigned i = NumSkip; i != NodeOperands; ++i)
799     AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II,
800                VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
801 
802   // Add scratch registers as implicit def and early clobber
803   if (ScratchRegs)
804     for (unsigned i = 0; ScratchRegs[i]; ++i)
805       MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
806                                  RegState::EarlyClobber);
807 
808   // Transfer all of the memory reference descriptions of this instruction.
809   MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(),
810                  cast<MachineSDNode>(Node)->memoperands_end());
811 
812   // Insert the instruction into position in the block. This needs to
813   // happen before any custom inserter hook is called so that the
814   // hook knows where in the block to insert the replacement code.
815   MBB->insert(InsertPos, MIB);
816 
817   // The MachineInstr may also define physregs instead of virtregs.  These
818   // physreg values can reach other instructions in different ways:
819   //
820   // 1. When there is a use of a Node value beyond the explicitly defined
821   //    virtual registers, we emit a CopyFromReg for one of the implicitly
822   //    defined physregs.  This only happens when HasPhysRegOuts is true.
823   //
824   // 2. A CopyFromReg reading a physreg may be glued to this instruction.
825   //
826   // 3. A glued instruction may implicitly use a physreg.
827   //
828   // 4. A glued instruction may use a RegisterSDNode operand.
829   //
830   // Collect all the used physreg defs, and make sure that any unused physreg
831   // defs are marked as dead.
832   SmallVector<unsigned, 8> UsedRegs;
833 
834   // Additional results must be physical register defs.
835   if (HasPhysRegOuts) {
836     for (unsigned i = NumDefs; i < NumResults; ++i) {
837       unsigned Reg = II.getImplicitDefs()[i - NumDefs];
838       if (!Node->hasAnyUseOfValue(i))
839         continue;
840       // This implicitly defined physreg has a use.
841       UsedRegs.push_back(Reg);
842       EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
843     }
844   }
845 
846   // Scan the glue chain for any used physregs.
847   if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
848     for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
849       if (F->getOpcode() == ISD::CopyFromReg) {
850         UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
851         continue;
852       } else if (F->getOpcode() == ISD::CopyToReg) {
853         // Skip CopyToReg nodes that are internal to the glue chain.
854         continue;
855       }
856       // Collect declared implicit uses.
857       const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
858       UsedRegs.append(MCID.getImplicitUses(),
859                       MCID.getImplicitUses() + MCID.getNumImplicitUses());
860       // In addition to declared implicit uses, we must also check for
861       // direct RegisterSDNode operands.
862       for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
863         if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
864           unsigned Reg = R->getReg();
865           if (TargetRegisterInfo::isPhysicalRegister(Reg))
866             UsedRegs.push_back(Reg);
867         }
868     }
869   }
870 
871   // Finally mark unused registers as dead.
872   if (!UsedRegs.empty() || II.getImplicitDefs())
873     MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
874 
875   // Run post-isel target hook to adjust this instruction if needed.
876   if (II.hasPostISelHook())
877     TLI->AdjustInstrPostInstrSelection(MIB, Node);
878 }
879 
880 /// EmitSpecialNode - Generate machine code for a target-independent node and
881 /// needed dependencies.
882 void InstrEmitter::
883 EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
884                 DenseMap<SDValue, unsigned> &VRBaseMap) {
885   switch (Node->getOpcode()) {
886   default:
887 #ifndef NDEBUG
888     Node->dump();
889 #endif
890     llvm_unreachable("This target-independent node should have been selected!");
891   case ISD::EntryToken:
892     llvm_unreachable("EntryToken should have been excluded from the schedule!");
893   case ISD::MERGE_VALUES:
894   case ISD::TokenFactor: // fall thru
895     break;
896   case ISD::CopyToReg: {
897     unsigned SrcReg;
898     SDValue SrcVal = Node->getOperand(2);
899     if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
900       SrcReg = R->getReg();
901     else
902       SrcReg = getVR(SrcVal, VRBaseMap);
903 
904     unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
905     if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
906       break;
907 
908     BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
909             DestReg).addReg(SrcReg);
910     break;
911   }
912   case ISD::CopyFromReg: {
913     unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
914     EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
915     break;
916   }
917   case ISD::EH_LABEL: {
918     MCSymbol *S = cast<EHLabelSDNode>(Node)->getLabel();
919     BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
920             TII->get(TargetOpcode::EH_LABEL)).addSym(S);
921     break;
922   }
923 
924   case ISD::LIFETIME_START:
925   case ISD::LIFETIME_END: {
926     unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) ?
927     TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END;
928 
929     FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Node->getOperand(1));
930     BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
931     .addFrameIndex(FI->getIndex());
932     break;
933   }
934 
935   case ISD::INLINEASM: {
936     unsigned NumOps = Node->getNumOperands();
937     if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
938       --NumOps;  // Ignore the glue operand.
939 
940     // Create the inline asm machine instruction.
941     MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(),
942                                       TII->get(TargetOpcode::INLINEASM));
943 
944     // Add the asm string as an external symbol operand.
945     SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
946     const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
947     MIB.addExternalSymbol(AsmStr);
948 
949     // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
950     // bits.
951     int64_t ExtraInfo =
952       cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
953                           getZExtValue();
954     MIB.addImm(ExtraInfo);
955 
956     // Remember to operand index of the group flags.
957     SmallVector<unsigned, 8> GroupIdx;
958 
959     // Remember registers that are part of early-clobber defs.
960     SmallVector<unsigned, 8> ECRegs;
961 
962     // Add all of the operand registers to the instruction.
963     for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
964       unsigned Flags =
965         cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
966       const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
967 
968       GroupIdx.push_back(MIB->getNumOperands());
969       MIB.addImm(Flags);
970       ++i;  // Skip the ID value.
971 
972       switch (InlineAsm::getKind(Flags)) {
973       default: llvm_unreachable("Bad flags!");
974         case InlineAsm::Kind_RegDef:
975         for (unsigned j = 0; j != NumVals; ++j, ++i) {
976           unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
977           // FIXME: Add dead flags for physical and virtual registers defined.
978           // For now, mark physical register defs as implicit to help fast
979           // regalloc. This makes inline asm look a lot like calls.
980           MIB.addReg(Reg, RegState::Define |
981                   getImplRegState(TargetRegisterInfo::isPhysicalRegister(Reg)));
982         }
983         break;
984       case InlineAsm::Kind_RegDefEarlyClobber:
985       case InlineAsm::Kind_Clobber:
986         for (unsigned j = 0; j != NumVals; ++j, ++i) {
987           unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
988           MIB.addReg(Reg, RegState::Define | RegState::EarlyClobber |
989                   getImplRegState(TargetRegisterInfo::isPhysicalRegister(Reg)));
990           ECRegs.push_back(Reg);
991         }
992         break;
993       case InlineAsm::Kind_RegUse:  // Use of register.
994       case InlineAsm::Kind_Imm:  // Immediate.
995       case InlineAsm::Kind_Mem:  // Addressing mode.
996         // The addressing mode has been selected, just add all of the
997         // operands to the machine instruction.
998         for (unsigned j = 0; j != NumVals; ++j, ++i)
999           AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
1000                      /*IsDebug=*/false, IsClone, IsCloned);
1001 
1002         // Manually set isTied bits.
1003         if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
1004           unsigned DefGroup = 0;
1005           if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
1006             unsigned DefIdx = GroupIdx[DefGroup] + 1;
1007             unsigned UseIdx = GroupIdx.back() + 1;
1008             for (unsigned j = 0; j != NumVals; ++j)
1009               MIB->tieOperands(DefIdx + j, UseIdx + j);
1010           }
1011         }
1012         break;
1013       }
1014     }
1015 
1016     // GCC inline assembly allows input operands to also be early-clobber
1017     // output operands (so long as the operand is written only after it's
1018     // used), but this does not match the semantics of our early-clobber flag.
1019     // If an early-clobber operand register is also an input operand register,
1020     // then remove the early-clobber flag.
1021     for (unsigned Reg : ECRegs) {
1022       if (MIB->readsRegister(Reg, TRI)) {
1023         MachineOperand *MO = MIB->findRegisterDefOperand(Reg, false, TRI);
1024         assert(MO && "No def operand for clobbered register?");
1025         MO->setIsEarlyClobber(false);
1026       }
1027     }
1028 
1029     // Get the mdnode from the asm if it exists and add it to the instruction.
1030     SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
1031     const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
1032     if (MD)
1033       MIB.addMetadata(MD);
1034 
1035     MBB->insert(InsertPos, MIB);
1036     break;
1037   }
1038   }
1039 }
1040 
1041 /// InstrEmitter - Construct an InstrEmitter and set it to start inserting
1042 /// at the given position in the given block.
1043 InstrEmitter::InstrEmitter(MachineBasicBlock *mbb,
1044                            MachineBasicBlock::iterator insertpos)
1045     : MF(mbb->getParent()), MRI(&MF->getRegInfo()),
1046       TII(MF->getSubtarget().getInstrInfo()),
1047       TRI(MF->getSubtarget().getRegisterInfo()),
1048       TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
1049       InsertPos(insertpos) {}
1050