1 //==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the Emit routines for the SelectionDAG class, which creates
10 // MachineInstrs based on the decisions of the SelectionDAG instruction
11 // selection.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "InstrEmitter.h"
16 #include "SDNodeDbgValue.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/CodeGen/MachineConstantPool.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/StackMaps.h"
24 #include "llvm/CodeGen/TargetInstrInfo.h"
25 #include "llvm/CodeGen/TargetLowering.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/DebugInfo.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/Target/TargetMachine.h"
33 using namespace llvm;
34 
35 #define DEBUG_TYPE "instr-emitter"
36 
37 /// MinRCSize - Smallest register class we allow when constraining virtual
38 /// registers.  If satisfying all register class constraints would require
39 /// using a smaller register class, emit a COPY to a new virtual register
40 /// instead.
41 const unsigned MinRCSize = 4;
42 
43 /// CountResults - The results of target nodes have register or immediate
44 /// operands first, then an optional chain, and optional glue operands (which do
45 /// not go into the resulting MachineInstr).
46 unsigned InstrEmitter::CountResults(SDNode *Node) {
47   unsigned N = Node->getNumValues();
48   while (N && Node->getValueType(N - 1) == MVT::Glue)
49     --N;
50   if (N && Node->getValueType(N - 1) == MVT::Other)
51     --N;    // Skip over chain result.
52   return N;
53 }
54 
55 /// countOperands - The inputs to target nodes have any actual inputs first,
56 /// followed by an optional chain operand, then an optional glue operand.
57 /// Compute the number of actual operands that will go into the resulting
58 /// MachineInstr.
59 ///
60 /// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
61 /// the chain and glue. These operands may be implicit on the machine instr.
62 static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
63                               unsigned &NumImpUses) {
64   unsigned N = Node->getNumOperands();
65   while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
66     --N;
67   if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
68     --N; // Ignore chain if it exists.
69 
70   // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
71   NumImpUses = N - NumExpUses;
72   for (unsigned I = N; I > NumExpUses; --I) {
73     if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
74       continue;
75     if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
76       if (Register::isPhysicalRegister(RN->getReg()))
77         continue;
78     NumImpUses = N - I;
79     break;
80   }
81 
82   return N;
83 }
84 
85 /// Return starting index of GC operand list.
86 // FIXME: need a better place for this. Put it in StackMaps?
87 static unsigned getStatepointGCArgStartIdx(MachineInstr *MI) {
88   assert(MI->getOpcode() == TargetOpcode::STATEPOINT &&
89          "STATEPOINT node expected");
90   unsigned OperIdx = StatepointOpers(MI).getNumDeoptArgsIdx();
91   unsigned NumDeopts = MI->getOperand(OperIdx).getImm();
92   // At this point stack references has not been lowered yet, so they
93   // take single operand.
94   ++OperIdx;
95   while (NumDeopts--) {
96     MachineOperand &MO = MI->getOperand(OperIdx);
97     if (MO.isImm() && MO.getImm() == StackMaps::ConstantOp) {
98       ++OperIdx;
99       assert(MI->getOperand(OperIdx).isImm() &&
100              "Unexpected statepoint operand");
101     }
102     ++OperIdx;
103   }
104   return OperIdx;
105 }
106 
107 /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
108 /// implicit physical register output.
109 void InstrEmitter::
110 EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
111                 Register SrcReg, DenseMap<SDValue, Register> &VRBaseMap) {
112   Register VRBase;
113   if (SrcReg.isVirtual()) {
114     // Just use the input register directly!
115     SDValue Op(Node, ResNo);
116     if (IsClone)
117       VRBaseMap.erase(Op);
118     bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
119     (void)isNew; // Silence compiler warning.
120     assert(isNew && "Node emitted out of order - early");
121     return;
122   }
123 
124   // If the node is only used by a CopyToReg and the dest reg is a vreg, use
125   // the CopyToReg'd destination register instead of creating a new vreg.
126   bool MatchReg = true;
127   const TargetRegisterClass *UseRC = nullptr;
128   MVT VT = Node->getSimpleValueType(ResNo);
129 
130   // Stick to the preferred register classes for legal types.
131   if (TLI->isTypeLegal(VT))
132     UseRC = TLI->getRegClassFor(VT, Node->isDivergent());
133 
134   if (!IsClone && !IsCloned)
135     for (SDNode *User : Node->uses()) {
136       bool Match = true;
137       if (User->getOpcode() == ISD::CopyToReg &&
138           User->getOperand(2).getNode() == Node &&
139           User->getOperand(2).getResNo() == ResNo) {
140         Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
141         if (DestReg.isVirtual()) {
142           VRBase = DestReg;
143           Match = false;
144         } else if (DestReg != SrcReg)
145           Match = false;
146       } else {
147         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
148           SDValue Op = User->getOperand(i);
149           if (Op.getNode() != Node || Op.getResNo() != ResNo)
150             continue;
151           MVT VT = Node->getSimpleValueType(Op.getResNo());
152           if (VT == MVT::Other || VT == MVT::Glue)
153             continue;
154           Match = false;
155           if (User->isMachineOpcode()) {
156             const MCInstrDesc &II = TII->get(User->getMachineOpcode());
157             const TargetRegisterClass *RC = nullptr;
158             if (i+II.getNumDefs() < II.getNumOperands()) {
159               RC = TRI->getAllocatableClass(
160                 TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF));
161             }
162             if (!UseRC)
163               UseRC = RC;
164             else if (RC) {
165               const TargetRegisterClass *ComRC =
166                 TRI->getCommonSubClass(UseRC, RC);
167               // If multiple uses expect disjoint register classes, we emit
168               // copies in AddRegisterOperand.
169               if (ComRC)
170                 UseRC = ComRC;
171             }
172           }
173         }
174       }
175       MatchReg &= Match;
176       if (VRBase)
177         break;
178     }
179 
180   const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
181   SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
182 
183   // Figure out the register class to create for the destreg.
184   if (VRBase) {
185     DstRC = MRI->getRegClass(VRBase);
186   } else if (UseRC) {
187     assert(TRI->isTypeLegalForClass(*UseRC, VT) &&
188            "Incompatible phys register def and uses!");
189     DstRC = UseRC;
190   } else {
191     DstRC = TLI->getRegClassFor(VT, Node->isDivergent());
192   }
193 
194   // If all uses are reading from the src physical register and copying the
195   // register is either impossible or very expensive, then don't create a copy.
196   if (MatchReg && SrcRC->getCopyCost() < 0) {
197     VRBase = SrcReg;
198   } else {
199     // Create the reg, emit the copy.
200     VRBase = MRI->createVirtualRegister(DstRC);
201     BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
202             VRBase).addReg(SrcReg);
203   }
204 
205   SDValue Op(Node, ResNo);
206   if (IsClone)
207     VRBaseMap.erase(Op);
208   bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
209   (void)isNew; // Silence compiler warning.
210   assert(isNew && "Node emitted out of order - early");
211 }
212 
213 void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
214                                        MachineInstrBuilder &MIB,
215                                        const MCInstrDesc &II,
216                                        bool IsClone, bool IsCloned,
217                                        DenseMap<SDValue, Register> &VRBaseMap) {
218   assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
219          "IMPLICIT_DEF should have been handled as a special case elsewhere!");
220 
221   unsigned NumResults = CountResults(Node);
222   bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
223                              II.isVariadic() && II.variadicOpsAreDefs();
224   unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs();
225   if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT)
226     NumVRegs = NumResults;
227   for (unsigned i = 0; i < NumVRegs; ++i) {
228     // If the specific node value is only used by a CopyToReg and the dest reg
229     // is a vreg in the same register class, use the CopyToReg'd destination
230     // register instead of creating a new vreg.
231     Register VRBase;
232     const TargetRegisterClass *RC =
233       TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF));
234     // Always let the value type influence the used register class. The
235     // constraints on the instruction may be too lax to represent the value
236     // type correctly. For example, a 64-bit float (X86::FR64) can't live in
237     // the 32-bit float super-class (X86::FR32).
238     if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
239       const TargetRegisterClass *VTRC = TLI->getRegClassFor(
240           Node->getSimpleValueType(i),
241           (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC))));
242       if (RC)
243         VTRC = TRI->getCommonSubClass(RC, VTRC);
244       if (VTRC)
245         RC = VTRC;
246     }
247 
248     if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) {
249       // Optional def must be a physical register.
250       VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
251       assert(VRBase.isPhysical());
252       MIB.addReg(VRBase, RegState::Define);
253     }
254 
255     if (!VRBase && !IsClone && !IsCloned)
256       for (SDNode *User : Node->uses()) {
257         if (User->getOpcode() == ISD::CopyToReg &&
258             User->getOperand(2).getNode() == Node &&
259             User->getOperand(2).getResNo() == i) {
260           unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
261           if (Register::isVirtualRegister(Reg)) {
262             const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
263             if (RegRC == RC) {
264               VRBase = Reg;
265               MIB.addReg(VRBase, RegState::Define);
266               break;
267             }
268           }
269         }
270       }
271 
272     // Create the result registers for this node and add the result regs to
273     // the machine instruction.
274     if (VRBase == 0) {
275       assert(RC && "Isn't a register operand!");
276       VRBase = MRI->createVirtualRegister(RC);
277       MIB.addReg(VRBase, RegState::Define);
278     }
279 
280     // If this def corresponds to a result of the SDNode insert the VRBase into
281     // the lookup map.
282     if (i < NumResults) {
283       SDValue Op(Node, i);
284       if (IsClone)
285         VRBaseMap.erase(Op);
286       bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
287       (void)isNew; // Silence compiler warning.
288       assert(isNew && "Node emitted out of order - early");
289     }
290   }
291 }
292 
293 /// getVR - Return the virtual register corresponding to the specified result
294 /// of the specified node.
295 Register InstrEmitter::getVR(SDValue Op,
296                              DenseMap<SDValue, Register> &VRBaseMap) {
297   if (Op.isMachineOpcode() &&
298       Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
299     // Add an IMPLICIT_DEF instruction before every use.
300     // IMPLICIT_DEF can produce any type of result so its MCInstrDesc
301     // does not include operand register class info.
302     const TargetRegisterClass *RC = TLI->getRegClassFor(
303         Op.getSimpleValueType(), Op.getNode()->isDivergent());
304     Register VReg = MRI->createVirtualRegister(RC);
305     BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
306             TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
307     return VReg;
308   }
309 
310   DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
311   assert(I != VRBaseMap.end() && "Node emitted out of order - late");
312   return I->second;
313 }
314 
315 
316 /// AddRegisterOperand - Add the specified register as an operand to the
317 /// specified machine instr. Insert register copies if the register is
318 /// not in the required register class.
319 void
320 InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
321                                  SDValue Op,
322                                  unsigned IIOpNum,
323                                  const MCInstrDesc *II,
324                                  DenseMap<SDValue, Register> &VRBaseMap,
325                                  bool IsDebug, bool IsClone, bool IsCloned) {
326   assert(Op.getValueType() != MVT::Other &&
327          Op.getValueType() != MVT::Glue &&
328          "Chain and glue operands should occur at end of operand list!");
329   // Get/emit the operand.
330   Register VReg = getVR(Op, VRBaseMap);
331 
332   const MCInstrDesc &MCID = MIB->getDesc();
333   bool isOptDef = IIOpNum < MCID.getNumOperands() &&
334     MCID.OpInfo[IIOpNum].isOptionalDef();
335 
336   // If the instruction requires a register in a different class, create
337   // a new virtual register and copy the value into it, but first attempt to
338   // shrink VReg's register class within reason.  For example, if VReg == GR32
339   // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
340   if (II) {
341     const TargetRegisterClass *OpRC = nullptr;
342     if (IIOpNum < II->getNumOperands())
343       OpRC = TII->getRegClass(*II, IIOpNum, TRI, *MF);
344 
345     if (OpRC) {
346       const TargetRegisterClass *ConstrainedRC
347         = MRI->constrainRegClass(VReg, OpRC, MinRCSize);
348       if (!ConstrainedRC) {
349         OpRC = TRI->getAllocatableClass(OpRC);
350         assert(OpRC && "Constraints cannot be fulfilled for allocation");
351         Register NewVReg = MRI->createVirtualRegister(OpRC);
352         BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
353                 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
354         VReg = NewVReg;
355       } else {
356         assert(ConstrainedRC->isAllocatable() &&
357            "Constraining an allocatable VReg produced an unallocatable class?");
358       }
359     }
360   }
361 
362   // If this value has only one use, that use is a kill. This is a
363   // conservative approximation. InstrEmitter does trivial coalescing
364   // with CopyFromReg nodes, so don't emit kill flags for them.
365   // Avoid kill flags on Schedule cloned nodes, since there will be
366   // multiple uses.
367   // Tied operands are never killed, so we need to check that. And that
368   // means we need to determine the index of the operand.
369   bool isKill = Op.hasOneUse() &&
370                 Op.getNode()->getOpcode() != ISD::CopyFromReg &&
371                 !IsDebug &&
372                 !(IsClone || IsCloned);
373   if (isKill) {
374     unsigned Idx = MIB->getNumOperands();
375     while (Idx > 0 &&
376            MIB->getOperand(Idx-1).isReg() &&
377            MIB->getOperand(Idx-1).isImplicit())
378       --Idx;
379     bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
380     if (isTied)
381       isKill = false;
382   }
383 
384   MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) |
385              getDebugRegState(IsDebug));
386 }
387 
388 /// AddOperand - Add the specified operand to the specified machine instr.  II
389 /// specifies the instruction information for the node, and IIOpNum is the
390 /// operand number (in the II) that we are adding.
391 void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
392                               SDValue Op,
393                               unsigned IIOpNum,
394                               const MCInstrDesc *II,
395                               DenseMap<SDValue, Register> &VRBaseMap,
396                               bool IsDebug, bool IsClone, bool IsCloned) {
397   if (Op.isMachineOpcode()) {
398     AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
399                        IsDebug, IsClone, IsCloned);
400   } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
401     MIB.addImm(C->getSExtValue());
402   } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
403     MIB.addFPImm(F->getConstantFPValue());
404   } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
405     Register VReg = R->getReg();
406     MVT OpVT = Op.getSimpleValueType();
407     const TargetRegisterClass *IIRC =
408         II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI, *MF))
409            : nullptr;
410     const TargetRegisterClass *OpRC =
411         TLI->isTypeLegal(OpVT)
412             ? TLI->getRegClassFor(OpVT,
413                                   Op.getNode()->isDivergent() ||
414                                       (IIRC && TRI->isDivergentRegClass(IIRC)))
415             : nullptr;
416 
417     if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) {
418       Register NewVReg = MRI->createVirtualRegister(IIRC);
419       BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
420                TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
421       VReg = NewVReg;
422     }
423     // Turn additional physreg operands into implicit uses on non-variadic
424     // instructions. This is used by call and return instructions passing
425     // arguments in registers.
426     bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
427     MIB.addReg(VReg, getImplRegState(Imp));
428   } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
429     MIB.addRegMask(RM->getRegMask());
430   } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
431     MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(),
432                          TGA->getTargetFlags());
433   } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
434     MIB.addMBB(BBNode->getBasicBlock());
435   } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
436     MIB.addFrameIndex(FI->getIndex());
437   } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
438     MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags());
439   } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
440     int Offset = CP->getOffset();
441     Align Alignment = CP->getAlign();
442 
443     unsigned Idx;
444     MachineConstantPool *MCP = MF->getConstantPool();
445     if (CP->isMachineConstantPoolEntry())
446       Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Alignment);
447     else
448       Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Alignment);
449     MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags());
450   } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
451     MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags());
452   } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) {
453     MIB.addSym(SymNode->getMCSymbol());
454   } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
455     MIB.addBlockAddress(BA->getBlockAddress(),
456                         BA->getOffset(),
457                         BA->getTargetFlags());
458   } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
459     MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags());
460   } else {
461     assert(Op.getValueType() != MVT::Other &&
462            Op.getValueType() != MVT::Glue &&
463            "Chain and glue operands should occur at end of operand list!");
464     AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
465                        IsDebug, IsClone, IsCloned);
466   }
467 }
468 
469 Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx,
470                                           MVT VT, bool isDivergent, const DebugLoc &DL) {
471   const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
472   const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);
473 
474   // RC is a sub-class of VRC that supports SubIdx.  Try to constrain VReg
475   // within reason.
476   if (RC && RC != VRC)
477     RC = MRI->constrainRegClass(VReg, RC, MinRCSize);
478 
479   // VReg has been adjusted.  It can be used with SubIdx operands now.
480   if (RC)
481     return VReg;
482 
483   // VReg couldn't be reasonably constrained.  Emit a COPY to a new virtual
484   // register instead.
485   RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx);
486   assert(RC && "No legal register class for VT supports that SubIdx");
487   Register NewReg = MRI->createVirtualRegister(RC);
488   BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
489     .addReg(VReg);
490   return NewReg;
491 }
492 
493 /// EmitSubregNode - Generate machine code for subreg nodes.
494 ///
495 void InstrEmitter::EmitSubregNode(SDNode *Node,
496                                   DenseMap<SDValue, Register> &VRBaseMap,
497                                   bool IsClone, bool IsCloned) {
498   Register VRBase;
499   unsigned Opc = Node->getMachineOpcode();
500 
501   // If the node is only used by a CopyToReg and the dest reg is a vreg, use
502   // the CopyToReg'd destination register instead of creating a new vreg.
503   for (SDNode *User : Node->uses()) {
504     if (User->getOpcode() == ISD::CopyToReg &&
505         User->getOperand(2).getNode() == Node) {
506       Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
507       if (DestReg.isVirtual()) {
508         VRBase = DestReg;
509         break;
510       }
511     }
512   }
513 
514   if (Opc == TargetOpcode::EXTRACT_SUBREG) {
515     // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub.  There are no
516     // constraints on the %dst register, COPY can target all legal register
517     // classes.
518     unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
519     const TargetRegisterClass *TRC =
520       TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
521 
522     Register Reg;
523     MachineInstr *DefMI;
524     RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
525     if (R && Register::isPhysicalRegister(R->getReg())) {
526       Reg = R->getReg();
527       DefMI = nullptr;
528     } else {
529       Reg = R ? R->getReg() : getVR(Node->getOperand(0), VRBaseMap);
530       DefMI = MRI->getVRegDef(Reg);
531     }
532 
533     Register SrcReg, DstReg;
534     unsigned DefSubIdx;
535     if (DefMI &&
536         TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
537         SubIdx == DefSubIdx &&
538         TRC == MRI->getRegClass(SrcReg)) {
539       // Optimize these:
540       // r1025 = s/zext r1024, 4
541       // r1026 = extract_subreg r1025, 4
542       // to a copy
543       // r1026 = copy r1024
544       VRBase = MRI->createVirtualRegister(TRC);
545       BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
546               TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
547       MRI->clearKillFlags(SrcReg);
548     } else {
549       // Reg may not support a SubIdx sub-register, and we may need to
550       // constrain its register class or issue a COPY to a compatible register
551       // class.
552       if (Reg.isVirtual())
553         Reg = ConstrainForSubReg(Reg, SubIdx,
554                                  Node->getOperand(0).getSimpleValueType(),
555                                  Node->isDivergent(), Node->getDebugLoc());
556       // Create the destreg if it is missing.
557       if (!VRBase)
558         VRBase = MRI->createVirtualRegister(TRC);
559 
560       // Create the extract_subreg machine instruction.
561       MachineInstrBuilder CopyMI =
562           BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
563                   TII->get(TargetOpcode::COPY), VRBase);
564       if (Reg.isVirtual())
565         CopyMI.addReg(Reg, 0, SubIdx);
566       else
567         CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
568     }
569   } else if (Opc == TargetOpcode::INSERT_SUBREG ||
570              Opc == TargetOpcode::SUBREG_TO_REG) {
571     SDValue N0 = Node->getOperand(0);
572     SDValue N1 = Node->getOperand(1);
573     SDValue N2 = Node->getOperand(2);
574     unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
575 
576     // Figure out the register class to create for the destreg.  It should be
577     // the largest legal register class supporting SubIdx sub-registers.
578     // RegisterCoalescer will constrain it further if it decides to eliminate
579     // the INSERT_SUBREG instruction.
580     //
581     //   %dst = INSERT_SUBREG %src, %sub, SubIdx
582     //
583     // is lowered by TwoAddressInstructionPass to:
584     //
585     //   %dst = COPY %src
586     //   %dst:SubIdx = COPY %sub
587     //
588     // There is no constraint on the %src register class.
589     //
590     const TargetRegisterClass *SRC =
591         TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
592     SRC = TRI->getSubClassWithSubReg(SRC, SubIdx);
593     assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG");
594 
595     if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase)))
596       VRBase = MRI->createVirtualRegister(SRC);
597 
598     // Create the insert_subreg or subreg_to_reg machine instruction.
599     MachineInstrBuilder MIB =
600       BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase);
601 
602     // If creating a subreg_to_reg, then the first input operand
603     // is an implicit value immediate, otherwise it's a register
604     if (Opc == TargetOpcode::SUBREG_TO_REG) {
605       const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
606       MIB.addImm(SD->getZExtValue());
607     } else
608       AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
609                  IsClone, IsCloned);
610     // Add the subregister being inserted
611     AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
612                IsClone, IsCloned);
613     MIB.addImm(SubIdx);
614     MBB->insert(InsertPos, MIB);
615   } else
616     llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg");
617 
618   SDValue Op(Node, 0);
619   bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
620   (void)isNew; // Silence compiler warning.
621   assert(isNew && "Node emitted out of order - early");
622 }
623 
624 /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
625 /// COPY_TO_REGCLASS is just a normal copy, except that the destination
626 /// register is constrained to be in a particular register class.
627 ///
628 void
629 InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
630                                      DenseMap<SDValue, Register> &VRBaseMap) {
631   unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
632 
633   // Create the new VReg in the destination class and emit a copy.
634   unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
635   const TargetRegisterClass *DstRC =
636     TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
637   Register NewVReg = MRI->createVirtualRegister(DstRC);
638   BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
639     NewVReg).addReg(VReg);
640 
641   SDValue Op(Node, 0);
642   bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
643   (void)isNew; // Silence compiler warning.
644   assert(isNew && "Node emitted out of order - early");
645 }
646 
647 /// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
648 ///
649 void InstrEmitter::EmitRegSequence(SDNode *Node,
650                                   DenseMap<SDValue, Register> &VRBaseMap,
651                                   bool IsClone, bool IsCloned) {
652   unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
653   const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
654   Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
655   const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
656   MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
657   unsigned NumOps = Node->getNumOperands();
658   // If the input pattern has a chain, then the root of the corresponding
659   // output pattern will get a chain as well. This can happen to be a
660   // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults).
661   if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other)
662     --NumOps; // Ignore chain if it exists.
663 
664   assert((NumOps & 1) == 1 &&
665          "REG_SEQUENCE must have an odd number of operands!");
666   for (unsigned i = 1; i != NumOps; ++i) {
667     SDValue Op = Node->getOperand(i);
668     if ((i & 1) == 0) {
669       RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
670       // Skip physical registers as they don't have a vreg to get and we'll
671       // insert copies for them in TwoAddressInstructionPass anyway.
672       if (!R || !Register::isPhysicalRegister(R->getReg())) {
673         unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
674         unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
675         const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
676         const TargetRegisterClass *SRC =
677         TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
678         if (SRC && SRC != RC) {
679           MRI->setRegClass(NewVReg, SRC);
680           RC = SRC;
681         }
682       }
683     }
684     AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
685                IsClone, IsCloned);
686   }
687 
688   MBB->insert(InsertPos, MIB);
689   SDValue Op(Node, 0);
690   bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
691   (void)isNew; // Silence compiler warning.
692   assert(isNew && "Node emitted out of order - early");
693 }
694 
695 /// EmitDbgValue - Generate machine instruction for a dbg_value node.
696 ///
697 MachineInstr *
698 InstrEmitter::EmitDbgValue(SDDbgValue *SD,
699                            DenseMap<SDValue, Register> &VRBaseMap) {
700   MDNode *Var = SD->getVariable();
701   MDNode *Expr = SD->getExpression();
702   DebugLoc DL = SD->getDebugLoc();
703   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
704          "Expected inlined-at fields to agree");
705 
706   SD->setIsEmitted();
707 
708   if (SD->isInvalidated()) {
709     // An invalidated SDNode must generate an undef DBG_VALUE: although the
710     // original value is no longer computed, earlier DBG_VALUEs live ranges
711     // must not leak into later code.
712     auto MIB = BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE));
713     MIB.addReg(0U);
714     MIB.addReg(0U, RegState::Debug);
715     MIB.addMetadata(Var);
716     MIB.addMetadata(Expr);
717     return &*MIB;
718   }
719 
720   if (SD->getKind() == SDDbgValue::FRAMEIX) {
721     // Stack address; this needs to be lowered in target-dependent fashion.
722     // EmitTargetCodeForFrameDebugValue is responsible for allocation.
723     auto FrameMI = BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE))
724                        .addFrameIndex(SD->getFrameIx());
725     if (SD->isIndirect())
726       // Push [fi + 0] onto the DIExpression stack.
727       FrameMI.addImm(0);
728     else
729       // Push fi onto the DIExpression stack.
730       FrameMI.addReg(0);
731     return FrameMI.addMetadata(Var).addMetadata(Expr);
732   }
733   // Otherwise, we're going to create an instruction here.
734   const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
735   MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
736   if (SD->getKind() == SDDbgValue::SDNODE) {
737     SDNode *Node = SD->getSDNode();
738     SDValue Op = SDValue(Node, SD->getResNo());
739     // It's possible we replaced this SDNode with other(s) and therefore
740     // didn't generate code for it.  It's better to catch these cases where
741     // they happen and transfer the debug info, but trying to guarantee that
742     // in all cases would be very fragile; this is a safeguard for any
743     // that were missed.
744     DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
745     if (I==VRBaseMap.end())
746       MIB.addReg(0U);       // undef
747     else
748       AddOperand(MIB, Op, (*MIB).getNumOperands(), &II, VRBaseMap,
749                  /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
750   } else if (SD->getKind() == SDDbgValue::VREG) {
751     MIB.addReg(SD->getVReg(), RegState::Debug);
752   } else if (SD->getKind() == SDDbgValue::CONST) {
753     const Value *V = SD->getConst();
754     if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
755       if (CI->getBitWidth() > 64)
756         MIB.addCImm(CI);
757       else
758         MIB.addImm(CI->getSExtValue());
759     } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
760       MIB.addFPImm(CF);
761     } else if (isa<ConstantPointerNull>(V)) {
762       // Note: This assumes that all nullptr constants are zero-valued.
763       MIB.addImm(0);
764     } else {
765       // Could be an Undef.  In any case insert an Undef so we can see what we
766       // dropped.
767       MIB.addReg(0U);
768     }
769   } else {
770     // Insert an Undef so we can see what we dropped.
771     MIB.addReg(0U);
772   }
773 
774   // Indirect addressing is indicated by an Imm as the second parameter.
775   if (SD->isIndirect())
776     MIB.addImm(0U);
777   else
778     MIB.addReg(0U, RegState::Debug);
779 
780   MIB.addMetadata(Var);
781   MIB.addMetadata(Expr);
782 
783   return &*MIB;
784 }
785 
786 MachineInstr *
787 InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) {
788   MDNode *Label = SD->getLabel();
789   DebugLoc DL = SD->getDebugLoc();
790   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
791          "Expected inlined-at fields to agree");
792 
793   const MCInstrDesc &II = TII->get(TargetOpcode::DBG_LABEL);
794   MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
795   MIB.addMetadata(Label);
796 
797   return &*MIB;
798 }
799 
800 /// EmitMachineNode - Generate machine code for a target-specific node and
801 /// needed dependencies.
802 ///
803 void InstrEmitter::
804 EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
805                 DenseMap<SDValue, Register> &VRBaseMap) {
806   unsigned Opc = Node->getMachineOpcode();
807 
808   // Handle subreg insert/extract specially
809   if (Opc == TargetOpcode::EXTRACT_SUBREG ||
810       Opc == TargetOpcode::INSERT_SUBREG ||
811       Opc == TargetOpcode::SUBREG_TO_REG) {
812     EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
813     return;
814   }
815 
816   // Handle COPY_TO_REGCLASS specially.
817   if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
818     EmitCopyToRegClassNode(Node, VRBaseMap);
819     return;
820   }
821 
822   // Handle REG_SEQUENCE specially.
823   if (Opc == TargetOpcode::REG_SEQUENCE) {
824     EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
825     return;
826   }
827 
828   if (Opc == TargetOpcode::IMPLICIT_DEF)
829     // We want a unique VR for each IMPLICIT_DEF use.
830     return;
831 
832   const MCInstrDesc &II = TII->get(Opc);
833   unsigned NumResults = CountResults(Node);
834   unsigned NumDefs = II.getNumDefs();
835   const MCPhysReg *ScratchRegs = nullptr;
836 
837   // Handle STACKMAP and PATCHPOINT specially and then use the generic code.
838   if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
839     // Stackmaps do not have arguments and do not preserve their calling
840     // convention. However, to simplify runtime support, they clobber the same
841     // scratch registers as AnyRegCC.
842     unsigned CC = CallingConv::AnyReg;
843     if (Opc == TargetOpcode::PATCHPOINT) {
844       CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
845       NumDefs = NumResults;
846     }
847     ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
848   } else if (Opc == TargetOpcode::STATEPOINT) {
849     NumDefs = NumResults;
850   }
851 
852   unsigned NumImpUses = 0;
853   unsigned NodeOperands =
854     countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
855   bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
856                              II.isVariadic() && II.variadicOpsAreDefs();
857   bool HasPhysRegOuts = NumResults > NumDefs &&
858                         II.getImplicitDefs() != nullptr && !HasVRegVariadicDefs;
859 #ifndef NDEBUG
860   unsigned NumMIOperands = NodeOperands + NumResults;
861   if (II.isVariadic())
862     assert(NumMIOperands >= II.getNumOperands() &&
863            "Too few operands for a variadic node!");
864   else
865     assert(NumMIOperands >= II.getNumOperands() &&
866            NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() +
867                             NumImpUses &&
868            "#operands for dag node doesn't match .td file!");
869 #endif
870 
871   // Create the new machine instruction.
872   MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II);
873 
874   // Add result register values for things that are defined by this
875   // instruction.
876   if (NumResults) {
877     CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap);
878 
879     // Transfer any IR flags from the SDNode to the MachineInstr
880     MachineInstr *MI = MIB.getInstr();
881     const SDNodeFlags Flags = Node->getFlags();
882     if (Flags.hasNoSignedZeros())
883       MI->setFlag(MachineInstr::MIFlag::FmNsz);
884 
885     if (Flags.hasAllowReciprocal())
886       MI->setFlag(MachineInstr::MIFlag::FmArcp);
887 
888     if (Flags.hasNoNaNs())
889       MI->setFlag(MachineInstr::MIFlag::FmNoNans);
890 
891     if (Flags.hasNoInfs())
892       MI->setFlag(MachineInstr::MIFlag::FmNoInfs);
893 
894     if (Flags.hasAllowContract())
895       MI->setFlag(MachineInstr::MIFlag::FmContract);
896 
897     if (Flags.hasApproximateFuncs())
898       MI->setFlag(MachineInstr::MIFlag::FmAfn);
899 
900     if (Flags.hasAllowReassociation())
901       MI->setFlag(MachineInstr::MIFlag::FmReassoc);
902 
903     if (Flags.hasNoUnsignedWrap())
904       MI->setFlag(MachineInstr::MIFlag::NoUWrap);
905 
906     if (Flags.hasNoSignedWrap())
907       MI->setFlag(MachineInstr::MIFlag::NoSWrap);
908 
909     if (Flags.hasExact())
910       MI->setFlag(MachineInstr::MIFlag::IsExact);
911 
912     if (Flags.hasNoFPExcept())
913       MI->setFlag(MachineInstr::MIFlag::NoFPExcept);
914   }
915 
916   // Emit all of the actual operands of this instruction, adding them to the
917   // instruction as appropriate.
918   bool HasOptPRefs = NumDefs > NumResults;
919   assert((!HasOptPRefs || !HasPhysRegOuts) &&
920          "Unable to cope with optional defs and phys regs defs!");
921   unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0;
922   for (unsigned i = NumSkip; i != NodeOperands; ++i)
923     AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II,
924                VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
925 
926   // Add scratch registers as implicit def and early clobber
927   if (ScratchRegs)
928     for (unsigned i = 0; ScratchRegs[i]; ++i)
929       MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
930                                  RegState::EarlyClobber);
931 
932   // Set the memory reference descriptions of this instruction now that it is
933   // part of the function.
934   MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands());
935 
936   // Insert the instruction into position in the block. This needs to
937   // happen before any custom inserter hook is called so that the
938   // hook knows where in the block to insert the replacement code.
939   MBB->insert(InsertPos, MIB);
940 
941   // The MachineInstr may also define physregs instead of virtregs.  These
942   // physreg values can reach other instructions in different ways:
943   //
944   // 1. When there is a use of a Node value beyond the explicitly defined
945   //    virtual registers, we emit a CopyFromReg for one of the implicitly
946   //    defined physregs.  This only happens when HasPhysRegOuts is true.
947   //
948   // 2. A CopyFromReg reading a physreg may be glued to this instruction.
949   //
950   // 3. A glued instruction may implicitly use a physreg.
951   //
952   // 4. A glued instruction may use a RegisterSDNode operand.
953   //
954   // Collect all the used physreg defs, and make sure that any unused physreg
955   // defs are marked as dead.
956   SmallVector<Register, 8> UsedRegs;
957 
958   // Additional results must be physical register defs.
959   if (HasPhysRegOuts) {
960     for (unsigned i = NumDefs; i < NumResults; ++i) {
961       Register Reg = II.getImplicitDefs()[i - NumDefs];
962       if (!Node->hasAnyUseOfValue(i))
963         continue;
964       // This implicitly defined physreg has a use.
965       UsedRegs.push_back(Reg);
966       EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
967     }
968   }
969 
970   // Scan the glue chain for any used physregs.
971   if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
972     for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
973       if (F->getOpcode() == ISD::CopyFromReg) {
974         UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
975         continue;
976       } else if (F->getOpcode() == ISD::CopyToReg) {
977         // Skip CopyToReg nodes that are internal to the glue chain.
978         continue;
979       }
980       // Collect declared implicit uses.
981       const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
982       UsedRegs.append(MCID.getImplicitUses(),
983                       MCID.getImplicitUses() + MCID.getNumImplicitUses());
984       // In addition to declared implicit uses, we must also check for
985       // direct RegisterSDNode operands.
986       for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
987         if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
988           Register Reg = R->getReg();
989           if (Reg.isPhysical())
990             UsedRegs.push_back(Reg);
991         }
992     }
993   }
994 
995   // Finally mark unused registers as dead.
996   if (!UsedRegs.empty() || II.getImplicitDefs() || II.hasOptionalDef())
997     MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
998 
999   // STATEPOINT is too 'dynamic' to have meaningful machine description.
1000   // We have to manually tie operands.
1001   if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) {
1002     assert(!HasPhysRegOuts && "STATEPOINT mishandled");
1003     MachineInstr *MI = MIB;
1004     unsigned Def = 0;
1005     unsigned Use = getStatepointGCArgStartIdx(MI) + 1;
1006     while (Def < NumDefs) {
1007       if (MI->getOperand(Use).isReg())
1008         MI->tieOperands(Def++, Use);
1009       Use += 2;
1010     }
1011   }
1012 
1013   // Run post-isel target hook to adjust this instruction if needed.
1014   if (II.hasPostISelHook())
1015     TLI->AdjustInstrPostInstrSelection(*MIB, Node);
1016 }
1017 
1018 /// EmitSpecialNode - Generate machine code for a target-independent node and
1019 /// needed dependencies.
1020 void InstrEmitter::
1021 EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
1022                 DenseMap<SDValue, Register> &VRBaseMap) {
1023   switch (Node->getOpcode()) {
1024   default:
1025 #ifndef NDEBUG
1026     Node->dump();
1027 #endif
1028     llvm_unreachable("This target-independent node should have been selected!");
1029   case ISD::EntryToken:
1030     llvm_unreachable("EntryToken should have been excluded from the schedule!");
1031   case ISD::MERGE_VALUES:
1032   case ISD::TokenFactor: // fall thru
1033     break;
1034   case ISD::CopyToReg: {
1035     Register DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1036     SDValue SrcVal = Node->getOperand(2);
1037     if (Register::isVirtualRegister(DestReg) && SrcVal.isMachineOpcode() &&
1038         SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
1039       // Instead building a COPY to that vreg destination, build an
1040       // IMPLICIT_DEF instruction instead.
1041       BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1042               TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
1043       break;
1044     }
1045     Register SrcReg;
1046     if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
1047       SrcReg = R->getReg();
1048     else
1049       SrcReg = getVR(SrcVal, VRBaseMap);
1050 
1051     if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
1052       break;
1053 
1054     BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
1055             DestReg).addReg(SrcReg);
1056     break;
1057   }
1058   case ISD::CopyFromReg: {
1059     unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1060     EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
1061     break;
1062   }
1063   case ISD::EH_LABEL:
1064   case ISD::ANNOTATION_LABEL: {
1065     unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL)
1066                        ? TargetOpcode::EH_LABEL
1067                        : TargetOpcode::ANNOTATION_LABEL;
1068     MCSymbol *S = cast<LabelSDNode>(Node)->getLabel();
1069     BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1070             TII->get(Opc)).addSym(S);
1071     break;
1072   }
1073 
1074   case ISD::LIFETIME_START:
1075   case ISD::LIFETIME_END: {
1076     unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) ?
1077     TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END;
1078 
1079     FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Node->getOperand(1));
1080     BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1081     .addFrameIndex(FI->getIndex());
1082     break;
1083   }
1084 
1085   case ISD::INLINEASM:
1086   case ISD::INLINEASM_BR: {
1087     unsigned NumOps = Node->getNumOperands();
1088     if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1089       --NumOps;  // Ignore the glue operand.
1090 
1091     // Create the inline asm machine instruction.
1092     unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR
1093                           ? TargetOpcode::INLINEASM_BR
1094                           : TargetOpcode::INLINEASM;
1095     MachineInstrBuilder MIB =
1096         BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc));
1097 
1098     // Add the asm string as an external symbol operand.
1099     SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
1100     const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
1101     MIB.addExternalSymbol(AsmStr);
1102 
1103     // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
1104     // bits.
1105     int64_t ExtraInfo =
1106       cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
1107                           getZExtValue();
1108     MIB.addImm(ExtraInfo);
1109 
1110     // Remember to operand index of the group flags.
1111     SmallVector<unsigned, 8> GroupIdx;
1112 
1113     // Remember registers that are part of early-clobber defs.
1114     SmallVector<unsigned, 8> ECRegs;
1115 
1116     // Add all of the operand registers to the instruction.
1117     for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1118       unsigned Flags =
1119         cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1120       const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1121 
1122       GroupIdx.push_back(MIB->getNumOperands());
1123       MIB.addImm(Flags);
1124       ++i;  // Skip the ID value.
1125 
1126       switch (InlineAsm::getKind(Flags)) {
1127       default: llvm_unreachable("Bad flags!");
1128         case InlineAsm::Kind_RegDef:
1129         for (unsigned j = 0; j != NumVals; ++j, ++i) {
1130           unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1131           // FIXME: Add dead flags for physical and virtual registers defined.
1132           // For now, mark physical register defs as implicit to help fast
1133           // regalloc. This makes inline asm look a lot like calls.
1134           MIB.addReg(Reg,
1135                      RegState::Define |
1136                          getImplRegState(Register::isPhysicalRegister(Reg)));
1137         }
1138         break;
1139       case InlineAsm::Kind_RegDefEarlyClobber:
1140       case InlineAsm::Kind_Clobber:
1141         for (unsigned j = 0; j != NumVals; ++j, ++i) {
1142           unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1143           MIB.addReg(Reg,
1144                      RegState::Define | RegState::EarlyClobber |
1145                          getImplRegState(Register::isPhysicalRegister(Reg)));
1146           ECRegs.push_back(Reg);
1147         }
1148         break;
1149       case InlineAsm::Kind_RegUse:  // Use of register.
1150       case InlineAsm::Kind_Imm:  // Immediate.
1151       case InlineAsm::Kind_Mem:  // Addressing mode.
1152         // The addressing mode has been selected, just add all of the
1153         // operands to the machine instruction.
1154         for (unsigned j = 0; j != NumVals; ++j, ++i)
1155           AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
1156                      /*IsDebug=*/false, IsClone, IsCloned);
1157 
1158         // Manually set isTied bits.
1159         if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
1160           unsigned DefGroup = 0;
1161           if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
1162             unsigned DefIdx = GroupIdx[DefGroup] + 1;
1163             unsigned UseIdx = GroupIdx.back() + 1;
1164             for (unsigned j = 0; j != NumVals; ++j)
1165               MIB->tieOperands(DefIdx + j, UseIdx + j);
1166           }
1167         }
1168         break;
1169       }
1170     }
1171 
1172     // GCC inline assembly allows input operands to also be early-clobber
1173     // output operands (so long as the operand is written only after it's
1174     // used), but this does not match the semantics of our early-clobber flag.
1175     // If an early-clobber operand register is also an input operand register,
1176     // then remove the early-clobber flag.
1177     for (unsigned Reg : ECRegs) {
1178       if (MIB->readsRegister(Reg, TRI)) {
1179         MachineOperand *MO =
1180             MIB->findRegisterDefOperand(Reg, false, false, TRI);
1181         assert(MO && "No def operand for clobbered register?");
1182         MO->setIsEarlyClobber(false);
1183       }
1184     }
1185 
1186     // Get the mdnode from the asm if it exists and add it to the instruction.
1187     SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
1188     const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
1189     if (MD)
1190       MIB.addMetadata(MD);
1191 
1192     MBB->insert(InsertPos, MIB);
1193     break;
1194   }
1195   }
1196 }
1197 
1198 /// InstrEmitter - Construct an InstrEmitter and set it to start inserting
1199 /// at the given position in the given block.
1200 InstrEmitter::InstrEmitter(MachineBasicBlock *mbb,
1201                            MachineBasicBlock::iterator insertpos)
1202     : MF(mbb->getParent()), MRI(&MF->getRegInfo()),
1203       TII(MF->getSubtarget().getInstrInfo()),
1204       TRI(MF->getSubtarget().getRegisterInfo()),
1205       TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
1206       InsertPos(insertpos) {}
1207