1 //==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the Emit routines for the SelectionDAG class, which creates
10 // MachineInstrs based on the decisions of the SelectionDAG instruction
11 // selection.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "InstrEmitter.h"
16 #include "SDNodeDbgValue.h"
17 #include "llvm/BinaryFormat/Dwarf.h"
18 #include "llvm/CodeGen/MachineConstantPool.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/StackMaps.h"
23 #include "llvm/CodeGen/TargetInstrInfo.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetSubtargetInfo.h"
26 #include "llvm/IR/DebugInfoMetadata.h"
27 #include "llvm/IR/PseudoProbe.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Target/TargetMachine.h"
30 using namespace llvm;
31
32 #define DEBUG_TYPE "instr-emitter"
33
34 /// MinRCSize - Smallest register class we allow when constraining virtual
35 /// registers. If satisfying all register class constraints would require
36 /// using a smaller register class, emit a COPY to a new virtual register
37 /// instead.
38 const unsigned MinRCSize = 4;
39
40 /// CountResults - The results of target nodes have register or immediate
41 /// operands first, then an optional chain, and optional glue operands (which do
42 /// not go into the resulting MachineInstr).
CountResults(SDNode * Node)43 unsigned InstrEmitter::CountResults(SDNode *Node) {
44 unsigned N = Node->getNumValues();
45 while (N && Node->getValueType(N - 1) == MVT::Glue)
46 --N;
47 if (N && Node->getValueType(N - 1) == MVT::Other)
48 --N; // Skip over chain result.
49 return N;
50 }
51
52 /// countOperands - The inputs to target nodes have any actual inputs first,
53 /// followed by an optional chain operand, then an optional glue operand.
54 /// Compute the number of actual operands that will go into the resulting
55 /// MachineInstr.
56 ///
57 /// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
58 /// the chain and glue. These operands may be implicit on the machine instr.
countOperands(SDNode * Node,unsigned NumExpUses,unsigned & NumImpUses)59 static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
60 unsigned &NumImpUses) {
61 unsigned N = Node->getNumOperands();
62 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
63 --N;
64 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
65 --N; // Ignore chain if it exists.
66
67 // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
68 NumImpUses = N - NumExpUses;
69 for (unsigned I = N; I > NumExpUses; --I) {
70 if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
71 continue;
72 if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
73 if (Register::isPhysicalRegister(RN->getReg()))
74 continue;
75 NumImpUses = N - I;
76 break;
77 }
78
79 return N;
80 }
81
82 /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
83 /// implicit physical register output.
84 void InstrEmitter::
EmitCopyFromReg(SDNode * Node,unsigned ResNo,bool IsClone,bool IsCloned,Register SrcReg,DenseMap<SDValue,Register> & VRBaseMap)85 EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
86 Register SrcReg, DenseMap<SDValue, Register> &VRBaseMap) {
87 Register VRBase;
88 if (SrcReg.isVirtual()) {
89 // Just use the input register directly!
90 SDValue Op(Node, ResNo);
91 if (IsClone)
92 VRBaseMap.erase(Op);
93 bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
94 (void)isNew; // Silence compiler warning.
95 assert(isNew && "Node emitted out of order - early");
96 return;
97 }
98
99 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
100 // the CopyToReg'd destination register instead of creating a new vreg.
101 bool MatchReg = true;
102 const TargetRegisterClass *UseRC = nullptr;
103 MVT VT = Node->getSimpleValueType(ResNo);
104
105 // Stick to the preferred register classes for legal types.
106 if (TLI->isTypeLegal(VT))
107 UseRC = TLI->getRegClassFor(VT, Node->isDivergent());
108
109 if (!IsClone && !IsCloned)
110 for (SDNode *User : Node->uses()) {
111 bool Match = true;
112 if (User->getOpcode() == ISD::CopyToReg &&
113 User->getOperand(2).getNode() == Node &&
114 User->getOperand(2).getResNo() == ResNo) {
115 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
116 if (DestReg.isVirtual()) {
117 VRBase = DestReg;
118 Match = false;
119 } else if (DestReg != SrcReg)
120 Match = false;
121 } else {
122 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
123 SDValue Op = User->getOperand(i);
124 if (Op.getNode() != Node || Op.getResNo() != ResNo)
125 continue;
126 MVT VT = Node->getSimpleValueType(Op.getResNo());
127 if (VT == MVT::Other || VT == MVT::Glue)
128 continue;
129 Match = false;
130 if (User->isMachineOpcode()) {
131 const MCInstrDesc &II = TII->get(User->getMachineOpcode());
132 const TargetRegisterClass *RC = nullptr;
133 if (i+II.getNumDefs() < II.getNumOperands()) {
134 RC = TRI->getAllocatableClass(
135 TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF));
136 }
137 if (!UseRC)
138 UseRC = RC;
139 else if (RC) {
140 const TargetRegisterClass *ComRC =
141 TRI->getCommonSubClass(UseRC, RC);
142 // If multiple uses expect disjoint register classes, we emit
143 // copies in AddRegisterOperand.
144 if (ComRC)
145 UseRC = ComRC;
146 }
147 }
148 }
149 }
150 MatchReg &= Match;
151 if (VRBase)
152 break;
153 }
154
155 const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
156 SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
157
158 // Figure out the register class to create for the destreg.
159 if (VRBase) {
160 DstRC = MRI->getRegClass(VRBase);
161 } else if (UseRC) {
162 assert(TRI->isTypeLegalForClass(*UseRC, VT) &&
163 "Incompatible phys register def and uses!");
164 DstRC = UseRC;
165 } else
166 DstRC = SrcRC;
167
168 // If all uses are reading from the src physical register and copying the
169 // register is either impossible or very expensive, then don't create a copy.
170 if (MatchReg && SrcRC->getCopyCost() < 0) {
171 VRBase = SrcReg;
172 } else {
173 // Create the reg, emit the copy.
174 VRBase = MRI->createVirtualRegister(DstRC);
175 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
176 VRBase).addReg(SrcReg);
177 }
178
179 SDValue Op(Node, ResNo);
180 if (IsClone)
181 VRBaseMap.erase(Op);
182 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
183 (void)isNew; // Silence compiler warning.
184 assert(isNew && "Node emitted out of order - early");
185 }
186
CreateVirtualRegisters(SDNode * Node,MachineInstrBuilder & MIB,const MCInstrDesc & II,bool IsClone,bool IsCloned,DenseMap<SDValue,Register> & VRBaseMap)187 void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
188 MachineInstrBuilder &MIB,
189 const MCInstrDesc &II,
190 bool IsClone, bool IsCloned,
191 DenseMap<SDValue, Register> &VRBaseMap) {
192 assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
193 "IMPLICIT_DEF should have been handled as a special case elsewhere!");
194
195 unsigned NumResults = CountResults(Node);
196 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
197 II.isVariadic() && II.variadicOpsAreDefs();
198 unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs();
199 if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT)
200 NumVRegs = NumResults;
201 for (unsigned i = 0; i < NumVRegs; ++i) {
202 // If the specific node value is only used by a CopyToReg and the dest reg
203 // is a vreg in the same register class, use the CopyToReg'd destination
204 // register instead of creating a new vreg.
205 Register VRBase;
206 const TargetRegisterClass *RC =
207 TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF));
208 // Always let the value type influence the used register class. The
209 // constraints on the instruction may be too lax to represent the value
210 // type correctly. For example, a 64-bit float (X86::FR64) can't live in
211 // the 32-bit float super-class (X86::FR32).
212 if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
213 const TargetRegisterClass *VTRC = TLI->getRegClassFor(
214 Node->getSimpleValueType(i),
215 (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC))));
216 if (RC)
217 VTRC = TRI->getCommonSubClass(RC, VTRC);
218 if (VTRC)
219 RC = VTRC;
220 }
221
222 if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) {
223 // Optional def must be a physical register.
224 VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
225 assert(VRBase.isPhysical());
226 MIB.addReg(VRBase, RegState::Define);
227 }
228
229 if (!VRBase && !IsClone && !IsCloned)
230 for (SDNode *User : Node->uses()) {
231 if (User->getOpcode() == ISD::CopyToReg &&
232 User->getOperand(2).getNode() == Node &&
233 User->getOperand(2).getResNo() == i) {
234 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
235 if (Register::isVirtualRegister(Reg)) {
236 const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
237 if (RegRC == RC) {
238 VRBase = Reg;
239 MIB.addReg(VRBase, RegState::Define);
240 break;
241 }
242 }
243 }
244 }
245
246 // Create the result registers for this node and add the result regs to
247 // the machine instruction.
248 if (VRBase == 0) {
249 assert(RC && "Isn't a register operand!");
250 VRBase = MRI->createVirtualRegister(RC);
251 MIB.addReg(VRBase, RegState::Define);
252 }
253
254 // If this def corresponds to a result of the SDNode insert the VRBase into
255 // the lookup map.
256 if (i < NumResults) {
257 SDValue Op(Node, i);
258 if (IsClone)
259 VRBaseMap.erase(Op);
260 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
261 (void)isNew; // Silence compiler warning.
262 assert(isNew && "Node emitted out of order - early");
263 }
264 }
265 }
266
267 /// getVR - Return the virtual register corresponding to the specified result
268 /// of the specified node.
getVR(SDValue Op,DenseMap<SDValue,Register> & VRBaseMap)269 Register InstrEmitter::getVR(SDValue Op,
270 DenseMap<SDValue, Register> &VRBaseMap) {
271 if (Op.isMachineOpcode() &&
272 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
273 // Add an IMPLICIT_DEF instruction before every use.
274 // IMPLICIT_DEF can produce any type of result so its MCInstrDesc
275 // does not include operand register class info.
276 const TargetRegisterClass *RC = TLI->getRegClassFor(
277 Op.getSimpleValueType(), Op.getNode()->isDivergent());
278 Register VReg = MRI->createVirtualRegister(RC);
279 BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
280 TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
281 return VReg;
282 }
283
284 DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
285 assert(I != VRBaseMap.end() && "Node emitted out of order - late");
286 return I->second;
287 }
288
289
290 /// AddRegisterOperand - Add the specified register as an operand to the
291 /// specified machine instr. Insert register copies if the register is
292 /// not in the required register class.
293 void
AddRegisterOperand(MachineInstrBuilder & MIB,SDValue Op,unsigned IIOpNum,const MCInstrDesc * II,DenseMap<SDValue,Register> & VRBaseMap,bool IsDebug,bool IsClone,bool IsCloned)294 InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
295 SDValue Op,
296 unsigned IIOpNum,
297 const MCInstrDesc *II,
298 DenseMap<SDValue, Register> &VRBaseMap,
299 bool IsDebug, bool IsClone, bool IsCloned) {
300 assert(Op.getValueType() != MVT::Other &&
301 Op.getValueType() != MVT::Glue &&
302 "Chain and glue operands should occur at end of operand list!");
303 // Get/emit the operand.
304 Register VReg = getVR(Op, VRBaseMap);
305
306 const MCInstrDesc &MCID = MIB->getDesc();
307 bool isOptDef = IIOpNum < MCID.getNumOperands() &&
308 MCID.OpInfo[IIOpNum].isOptionalDef();
309
310 // If the instruction requires a register in a different class, create
311 // a new virtual register and copy the value into it, but first attempt to
312 // shrink VReg's register class within reason. For example, if VReg == GR32
313 // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
314 if (II) {
315 const TargetRegisterClass *OpRC = nullptr;
316 if (IIOpNum < II->getNumOperands())
317 OpRC = TII->getRegClass(*II, IIOpNum, TRI, *MF);
318
319 if (OpRC) {
320 unsigned MinNumRegs = MinRCSize;
321 // Don't apply any RC size limit for IMPLICIT_DEF. Each use has a unique
322 // virtual register.
323 if (Op.isMachineOpcode() &&
324 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF)
325 MinNumRegs = 0;
326
327 const TargetRegisterClass *ConstrainedRC
328 = MRI->constrainRegClass(VReg, OpRC, MinNumRegs);
329 if (!ConstrainedRC) {
330 OpRC = TRI->getAllocatableClass(OpRC);
331 assert(OpRC && "Constraints cannot be fulfilled for allocation");
332 Register NewVReg = MRI->createVirtualRegister(OpRC);
333 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
334 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
335 VReg = NewVReg;
336 } else {
337 assert(ConstrainedRC->isAllocatable() &&
338 "Constraining an allocatable VReg produced an unallocatable class?");
339 }
340 }
341 }
342
343 // If this value has only one use, that use is a kill. This is a
344 // conservative approximation. InstrEmitter does trivial coalescing
345 // with CopyFromReg nodes, so don't emit kill flags for them.
346 // Avoid kill flags on Schedule cloned nodes, since there will be
347 // multiple uses.
348 // Tied operands are never killed, so we need to check that. And that
349 // means we need to determine the index of the operand.
350 bool isKill = Op.hasOneUse() &&
351 Op.getNode()->getOpcode() != ISD::CopyFromReg &&
352 !IsDebug &&
353 !(IsClone || IsCloned);
354 if (isKill) {
355 unsigned Idx = MIB->getNumOperands();
356 while (Idx > 0 &&
357 MIB->getOperand(Idx-1).isReg() &&
358 MIB->getOperand(Idx-1).isImplicit())
359 --Idx;
360 bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
361 if (isTied)
362 isKill = false;
363 }
364
365 MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) |
366 getDebugRegState(IsDebug));
367 }
368
369 /// AddOperand - Add the specified operand to the specified machine instr. II
370 /// specifies the instruction information for the node, and IIOpNum is the
371 /// operand number (in the II) that we are adding.
AddOperand(MachineInstrBuilder & MIB,SDValue Op,unsigned IIOpNum,const MCInstrDesc * II,DenseMap<SDValue,Register> & VRBaseMap,bool IsDebug,bool IsClone,bool IsCloned)372 void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
373 SDValue Op,
374 unsigned IIOpNum,
375 const MCInstrDesc *II,
376 DenseMap<SDValue, Register> &VRBaseMap,
377 bool IsDebug, bool IsClone, bool IsCloned) {
378 if (Op.isMachineOpcode()) {
379 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
380 IsDebug, IsClone, IsCloned);
381 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
382 MIB.addImm(C->getSExtValue());
383 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
384 MIB.addFPImm(F->getConstantFPValue());
385 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
386 Register VReg = R->getReg();
387 MVT OpVT = Op.getSimpleValueType();
388 const TargetRegisterClass *IIRC =
389 II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI, *MF))
390 : nullptr;
391 const TargetRegisterClass *OpRC =
392 TLI->isTypeLegal(OpVT)
393 ? TLI->getRegClassFor(OpVT,
394 Op.getNode()->isDivergent() ||
395 (IIRC && TRI->isDivergentRegClass(IIRC)))
396 : nullptr;
397
398 if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) {
399 Register NewVReg = MRI->createVirtualRegister(IIRC);
400 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
401 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
402 VReg = NewVReg;
403 }
404 // Turn additional physreg operands into implicit uses on non-variadic
405 // instructions. This is used by call and return instructions passing
406 // arguments in registers.
407 bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
408 MIB.addReg(VReg, getImplRegState(Imp));
409 } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
410 MIB.addRegMask(RM->getRegMask());
411 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
412 MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(),
413 TGA->getTargetFlags());
414 } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
415 MIB.addMBB(BBNode->getBasicBlock());
416 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
417 MIB.addFrameIndex(FI->getIndex());
418 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
419 MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags());
420 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
421 int Offset = CP->getOffset();
422 Align Alignment = CP->getAlign();
423
424 unsigned Idx;
425 MachineConstantPool *MCP = MF->getConstantPool();
426 if (CP->isMachineConstantPoolEntry())
427 Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Alignment);
428 else
429 Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Alignment);
430 MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags());
431 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
432 MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags());
433 } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) {
434 MIB.addSym(SymNode->getMCSymbol());
435 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
436 MIB.addBlockAddress(BA->getBlockAddress(),
437 BA->getOffset(),
438 BA->getTargetFlags());
439 } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
440 MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags());
441 } else {
442 assert(Op.getValueType() != MVT::Other &&
443 Op.getValueType() != MVT::Glue &&
444 "Chain and glue operands should occur at end of operand list!");
445 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
446 IsDebug, IsClone, IsCloned);
447 }
448 }
449
ConstrainForSubReg(Register VReg,unsigned SubIdx,MVT VT,bool isDivergent,const DebugLoc & DL)450 Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx,
451 MVT VT, bool isDivergent, const DebugLoc &DL) {
452 const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
453 const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);
454
455 // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg
456 // within reason.
457 if (RC && RC != VRC)
458 RC = MRI->constrainRegClass(VReg, RC, MinRCSize);
459
460 // VReg has been adjusted. It can be used with SubIdx operands now.
461 if (RC)
462 return VReg;
463
464 // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual
465 // register instead.
466 RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx);
467 assert(RC && "No legal register class for VT supports that SubIdx");
468 Register NewReg = MRI->createVirtualRegister(RC);
469 BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
470 .addReg(VReg);
471 return NewReg;
472 }
473
474 /// EmitSubregNode - Generate machine code for subreg nodes.
475 ///
EmitSubregNode(SDNode * Node,DenseMap<SDValue,Register> & VRBaseMap,bool IsClone,bool IsCloned)476 void InstrEmitter::EmitSubregNode(SDNode *Node,
477 DenseMap<SDValue, Register> &VRBaseMap,
478 bool IsClone, bool IsCloned) {
479 Register VRBase;
480 unsigned Opc = Node->getMachineOpcode();
481
482 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
483 // the CopyToReg'd destination register instead of creating a new vreg.
484 for (SDNode *User : Node->uses()) {
485 if (User->getOpcode() == ISD::CopyToReg &&
486 User->getOperand(2).getNode() == Node) {
487 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
488 if (DestReg.isVirtual()) {
489 VRBase = DestReg;
490 break;
491 }
492 }
493 }
494
495 if (Opc == TargetOpcode::EXTRACT_SUBREG) {
496 // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no
497 // constraints on the %dst register, COPY can target all legal register
498 // classes.
499 unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
500 const TargetRegisterClass *TRC =
501 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
502
503 Register Reg;
504 MachineInstr *DefMI;
505 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
506 if (R && Register::isPhysicalRegister(R->getReg())) {
507 Reg = R->getReg();
508 DefMI = nullptr;
509 } else {
510 Reg = R ? R->getReg() : getVR(Node->getOperand(0), VRBaseMap);
511 DefMI = MRI->getVRegDef(Reg);
512 }
513
514 Register SrcReg, DstReg;
515 unsigned DefSubIdx;
516 if (DefMI &&
517 TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
518 SubIdx == DefSubIdx &&
519 TRC == MRI->getRegClass(SrcReg)) {
520 // Optimize these:
521 // r1025 = s/zext r1024, 4
522 // r1026 = extract_subreg r1025, 4
523 // to a copy
524 // r1026 = copy r1024
525 VRBase = MRI->createVirtualRegister(TRC);
526 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
527 TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
528 MRI->clearKillFlags(SrcReg);
529 } else {
530 // Reg may not support a SubIdx sub-register, and we may need to
531 // constrain its register class or issue a COPY to a compatible register
532 // class.
533 if (Reg.isVirtual())
534 Reg = ConstrainForSubReg(Reg, SubIdx,
535 Node->getOperand(0).getSimpleValueType(),
536 Node->isDivergent(), Node->getDebugLoc());
537 // Create the destreg if it is missing.
538 if (!VRBase)
539 VRBase = MRI->createVirtualRegister(TRC);
540
541 // Create the extract_subreg machine instruction.
542 MachineInstrBuilder CopyMI =
543 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
544 TII->get(TargetOpcode::COPY), VRBase);
545 if (Reg.isVirtual())
546 CopyMI.addReg(Reg, 0, SubIdx);
547 else
548 CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
549 }
550 } else if (Opc == TargetOpcode::INSERT_SUBREG ||
551 Opc == TargetOpcode::SUBREG_TO_REG) {
552 SDValue N0 = Node->getOperand(0);
553 SDValue N1 = Node->getOperand(1);
554 SDValue N2 = Node->getOperand(2);
555 unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
556
557 // Figure out the register class to create for the destreg. It should be
558 // the largest legal register class supporting SubIdx sub-registers.
559 // RegisterCoalescer will constrain it further if it decides to eliminate
560 // the INSERT_SUBREG instruction.
561 //
562 // %dst = INSERT_SUBREG %src, %sub, SubIdx
563 //
564 // is lowered by TwoAddressInstructionPass to:
565 //
566 // %dst = COPY %src
567 // %dst:SubIdx = COPY %sub
568 //
569 // There is no constraint on the %src register class.
570 //
571 const TargetRegisterClass *SRC =
572 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
573 SRC = TRI->getSubClassWithSubReg(SRC, SubIdx);
574 assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG");
575
576 if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase)))
577 VRBase = MRI->createVirtualRegister(SRC);
578
579 // Create the insert_subreg or subreg_to_reg machine instruction.
580 MachineInstrBuilder MIB =
581 BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase);
582
583 // If creating a subreg_to_reg, then the first input operand
584 // is an implicit value immediate, otherwise it's a register
585 if (Opc == TargetOpcode::SUBREG_TO_REG) {
586 const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
587 MIB.addImm(SD->getZExtValue());
588 } else
589 AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
590 IsClone, IsCloned);
591 // Add the subregister being inserted
592 AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
593 IsClone, IsCloned);
594 MIB.addImm(SubIdx);
595 MBB->insert(InsertPos, MIB);
596 } else
597 llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg");
598
599 SDValue Op(Node, 0);
600 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
601 (void)isNew; // Silence compiler warning.
602 assert(isNew && "Node emitted out of order - early");
603 }
604
605 /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
606 /// COPY_TO_REGCLASS is just a normal copy, except that the destination
607 /// register is constrained to be in a particular register class.
608 ///
609 void
EmitCopyToRegClassNode(SDNode * Node,DenseMap<SDValue,Register> & VRBaseMap)610 InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
611 DenseMap<SDValue, Register> &VRBaseMap) {
612 unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
613
614 // Create the new VReg in the destination class and emit a copy.
615 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
616 const TargetRegisterClass *DstRC =
617 TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
618 Register NewVReg = MRI->createVirtualRegister(DstRC);
619 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
620 NewVReg).addReg(VReg);
621
622 SDValue Op(Node, 0);
623 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
624 (void)isNew; // Silence compiler warning.
625 assert(isNew && "Node emitted out of order - early");
626 }
627
628 /// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
629 ///
EmitRegSequence(SDNode * Node,DenseMap<SDValue,Register> & VRBaseMap,bool IsClone,bool IsCloned)630 void InstrEmitter::EmitRegSequence(SDNode *Node,
631 DenseMap<SDValue, Register> &VRBaseMap,
632 bool IsClone, bool IsCloned) {
633 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
634 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
635 Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
636 const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
637 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
638 unsigned NumOps = Node->getNumOperands();
639 // If the input pattern has a chain, then the root of the corresponding
640 // output pattern will get a chain as well. This can happen to be a
641 // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults).
642 if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other)
643 --NumOps; // Ignore chain if it exists.
644
645 assert((NumOps & 1) == 1 &&
646 "REG_SEQUENCE must have an odd number of operands!");
647 for (unsigned i = 1; i != NumOps; ++i) {
648 SDValue Op = Node->getOperand(i);
649 if ((i & 1) == 0) {
650 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
651 // Skip physical registers as they don't have a vreg to get and we'll
652 // insert copies for them in TwoAddressInstructionPass anyway.
653 if (!R || !Register::isPhysicalRegister(R->getReg())) {
654 unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
655 unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
656 const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
657 const TargetRegisterClass *SRC =
658 TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
659 if (SRC && SRC != RC) {
660 MRI->setRegClass(NewVReg, SRC);
661 RC = SRC;
662 }
663 }
664 }
665 AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
666 IsClone, IsCloned);
667 }
668
669 MBB->insert(InsertPos, MIB);
670 SDValue Op(Node, 0);
671 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
672 (void)isNew; // Silence compiler warning.
673 assert(isNew && "Node emitted out of order - early");
674 }
675
676 /// EmitDbgValue - Generate machine instruction for a dbg_value node.
677 ///
678 MachineInstr *
EmitDbgValue(SDDbgValue * SD,DenseMap<SDValue,Register> & VRBaseMap)679 InstrEmitter::EmitDbgValue(SDDbgValue *SD,
680 DenseMap<SDValue, Register> &VRBaseMap) {
681 MDNode *Var = SD->getVariable();
682 MDNode *Expr = SD->getExpression();
683 DebugLoc DL = SD->getDebugLoc();
684 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
685 "Expected inlined-at fields to agree");
686
687 SD->setIsEmitted();
688
689 ArrayRef<SDDbgOperand> LocationOps = SD->getLocationOps();
690 assert(!LocationOps.empty() && "dbg_value with no location operands?");
691
692 if (SD->isInvalidated())
693 return EmitDbgNoLocation(SD);
694
695 // Emit variadic dbg_value nodes as DBG_VALUE_LIST.
696 if (SD->isVariadic()) {
697 // DBG_VALUE_LIST := "DBG_VALUE_LIST" var, expression, loc (, loc)*
698 const MCInstrDesc &DbgValDesc = TII->get(TargetOpcode::DBG_VALUE_LIST);
699 // Build the DBG_VALUE_LIST instruction base.
700 auto MIB = BuildMI(*MF, DL, DbgValDesc);
701 MIB.addMetadata(Var);
702 MIB.addMetadata(Expr);
703 AddDbgValueLocationOps(MIB, DbgValDesc, LocationOps, VRBaseMap);
704 return &*MIB;
705 }
706
707 // Attempt to produce a DBG_INSTR_REF if we've been asked to.
708 // We currently exclude the possibility of instruction references for
709 // variadic nodes; if at some point we enable them, this should be moved
710 // above the variadic block.
711 if (EmitDebugInstrRefs)
712 if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap))
713 return InstrRef;
714
715 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
716 }
717
AddDbgValueLocationOps(MachineInstrBuilder & MIB,const MCInstrDesc & DbgValDesc,ArrayRef<SDDbgOperand> LocationOps,DenseMap<SDValue,Register> & VRBaseMap)718 void InstrEmitter::AddDbgValueLocationOps(
719 MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc,
720 ArrayRef<SDDbgOperand> LocationOps,
721 DenseMap<SDValue, Register> &VRBaseMap) {
722 for (const SDDbgOperand &Op : LocationOps) {
723 switch (Op.getKind()) {
724 case SDDbgOperand::FRAMEIX:
725 MIB.addFrameIndex(Op.getFrameIx());
726 break;
727 case SDDbgOperand::VREG:
728 MIB.addReg(Op.getVReg());
729 break;
730 case SDDbgOperand::SDNODE: {
731 SDValue V = SDValue(Op.getSDNode(), Op.getResNo());
732 // It's possible we replaced this SDNode with other(s) and therefore
733 // didn't generate code for it. It's better to catch these cases where
734 // they happen and transfer the debug info, but trying to guarantee that
735 // in all cases would be very fragile; this is a safeguard for any
736 // that were missed.
737 if (VRBaseMap.count(V) == 0)
738 MIB.addReg(0U); // undef
739 else
740 AddOperand(MIB, V, (*MIB).getNumOperands(), &DbgValDesc, VRBaseMap,
741 /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
742 } break;
743 case SDDbgOperand::CONST: {
744 const Value *V = Op.getConst();
745 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
746 if (CI->getBitWidth() > 64)
747 MIB.addCImm(CI);
748 else
749 MIB.addImm(CI->getSExtValue());
750 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
751 MIB.addFPImm(CF);
752 } else if (isa<ConstantPointerNull>(V)) {
753 // Note: This assumes that all nullptr constants are zero-valued.
754 MIB.addImm(0);
755 } else {
756 // Could be an Undef. In any case insert an Undef so we can see what we
757 // dropped.
758 MIB.addReg(0U);
759 }
760 } break;
761 }
762 }
763 }
764
765 MachineInstr *
EmitDbgInstrRef(SDDbgValue * SD,DenseMap<SDValue,Register> & VRBaseMap)766 InstrEmitter::EmitDbgInstrRef(SDDbgValue *SD,
767 DenseMap<SDValue, Register> &VRBaseMap) {
768 assert(!SD->isVariadic());
769 SDDbgOperand DbgOperand = SD->getLocationOps()[0];
770 MDNode *Var = SD->getVariable();
771 DIExpression *Expr = (DIExpression*)SD->getExpression();
772 DebugLoc DL = SD->getDebugLoc();
773 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_INSTR_REF);
774
775 // Handle variable locations that don't actually depend on the instructions
776 // in the program: constants and stack locations.
777 if (DbgOperand.getKind() == SDDbgOperand::FRAMEIX ||
778 DbgOperand.getKind() == SDDbgOperand::CONST)
779 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
780
781 // Immediately fold any indirectness from the LLVM-IR intrinsic into the
782 // expression:
783 if (SD->isIndirect()) {
784 std::vector<uint64_t> Elts = {dwarf::DW_OP_deref};
785 Expr = DIExpression::append(Expr, Elts);
786 }
787
788 // It may not be immediately possible to identify the MachineInstr that
789 // defines a VReg, it can depend for example on the order blocks are
790 // emitted in. When this happens, or when further analysis is needed later,
791 // produce an instruction like this:
792 //
793 // DBG_INSTR_REF %0:gr64, 0, !123, !456
794 //
795 // i.e., point the instruction at the vreg, and patch it up later in
796 // MachineFunction::finalizeDebugInstrRefs.
797 auto EmitHalfDoneInstrRef = [&](unsigned VReg) -> MachineInstr * {
798 auto MIB = BuildMI(*MF, DL, RefII);
799 MIB.addReg(VReg);
800 MIB.addImm(0);
801 MIB.addMetadata(Var);
802 MIB.addMetadata(Expr);
803 return MIB;
804 };
805
806 // Try to find both the defined register and the instruction defining it.
807 MachineInstr *DefMI = nullptr;
808 unsigned VReg;
809
810 if (DbgOperand.getKind() == SDDbgOperand::VREG) {
811 VReg = DbgOperand.getVReg();
812
813 // No definition means that block hasn't been emitted yet. Leave a vreg
814 // reference to be fixed later.
815 if (!MRI->hasOneDef(VReg))
816 return EmitHalfDoneInstrRef(VReg);
817
818 DefMI = &*MRI->def_instr_begin(VReg);
819 } else {
820 assert(DbgOperand.getKind() == SDDbgOperand::SDNODE);
821 // Look up the corresponding VReg for the given SDNode, if any.
822 SDNode *Node = DbgOperand.getSDNode();
823 SDValue Op = SDValue(Node, DbgOperand.getResNo());
824 DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
825 // No VReg -> produce a DBG_VALUE $noreg instead.
826 if (I==VRBaseMap.end())
827 return EmitDbgNoLocation(SD);
828
829 // Try to pick out a defining instruction at this point.
830 VReg = getVR(Op, VRBaseMap);
831
832 // Again, if there's no instruction defining the VReg right now, fix it up
833 // later.
834 if (!MRI->hasOneDef(VReg))
835 return EmitHalfDoneInstrRef(VReg);
836
837 DefMI = &*MRI->def_instr_begin(VReg);
838 }
839
840 // Avoid copy like instructions: they don't define values, only move them.
841 // Leave a virtual-register reference until it can be fixed up later, to find
842 // the underlying value definition.
843 if (DefMI->isCopyLike() || TII->isCopyInstr(*DefMI))
844 return EmitHalfDoneInstrRef(VReg);
845
846 auto MIB = BuildMI(*MF, DL, RefII);
847
848 // Find the operand number which defines the specified VReg.
849 unsigned OperandIdx = 0;
850 for (const auto &MO : DefMI->operands()) {
851 if (MO.isReg() && MO.isDef() && MO.getReg() == VReg)
852 break;
853 ++OperandIdx;
854 }
855 assert(OperandIdx < DefMI->getNumOperands());
856
857 // Make the DBG_INSTR_REF refer to that instruction, and that operand.
858 unsigned InstrNum = DefMI->getDebugInstrNum();
859 MIB.addImm(InstrNum);
860 MIB.addImm(OperandIdx);
861 MIB.addMetadata(Var);
862 MIB.addMetadata(Expr);
863 return &*MIB;
864 }
865
EmitDbgNoLocation(SDDbgValue * SD)866 MachineInstr *InstrEmitter::EmitDbgNoLocation(SDDbgValue *SD) {
867 // An invalidated SDNode must generate an undef DBG_VALUE: although the
868 // original value is no longer computed, earlier DBG_VALUEs live ranges
869 // must not leak into later code.
870 MDNode *Var = SD->getVariable();
871 MDNode *Expr = SD->getExpression();
872 DebugLoc DL = SD->getDebugLoc();
873 auto MIB = BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE));
874 MIB.addReg(0U);
875 MIB.addReg(0U);
876 MIB.addMetadata(Var);
877 MIB.addMetadata(Expr);
878 return &*MIB;
879 }
880
881 MachineInstr *
EmitDbgValueFromSingleOp(SDDbgValue * SD,DenseMap<SDValue,Register> & VRBaseMap)882 InstrEmitter::EmitDbgValueFromSingleOp(SDDbgValue *SD,
883 DenseMap<SDValue, Register> &VRBaseMap) {
884 MDNode *Var = SD->getVariable();
885 DIExpression *Expr = SD->getExpression();
886 DebugLoc DL = SD->getDebugLoc();
887 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
888
889 assert(SD->getLocationOps().size() == 1 &&
890 "Non variadic dbg_value should have only one location op");
891
892 // See about constant-folding the expression.
893 // Copy the location operand in case we replace it.
894 SmallVector<SDDbgOperand, 1> LocationOps(1, SD->getLocationOps()[0]);
895 if (Expr && LocationOps[0].getKind() == SDDbgOperand::CONST) {
896 const Value *V = LocationOps[0].getConst();
897 if (auto *C = dyn_cast<ConstantInt>(V)) {
898 std::tie(Expr, C) = Expr->constantFold(C);
899 LocationOps[0] = SDDbgOperand::fromConst(C);
900 }
901 }
902
903 // Emit non-variadic dbg_value nodes as DBG_VALUE.
904 // DBG_VALUE := "DBG_VALUE" loc, isIndirect, var, expr
905 auto MIB = BuildMI(*MF, DL, II);
906 AddDbgValueLocationOps(MIB, II, LocationOps, VRBaseMap);
907
908 if (SD->isIndirect())
909 MIB.addImm(0U);
910 else
911 MIB.addReg(0U);
912
913 return MIB.addMetadata(Var).addMetadata(Expr);
914 }
915
916 MachineInstr *
EmitDbgLabel(SDDbgLabel * SD)917 InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) {
918 MDNode *Label = SD->getLabel();
919 DebugLoc DL = SD->getDebugLoc();
920 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
921 "Expected inlined-at fields to agree");
922
923 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_LABEL);
924 MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
925 MIB.addMetadata(Label);
926
927 return &*MIB;
928 }
929
930 /// EmitMachineNode - Generate machine code for a target-specific node and
931 /// needed dependencies.
932 ///
933 void InstrEmitter::
EmitMachineNode(SDNode * Node,bool IsClone,bool IsCloned,DenseMap<SDValue,Register> & VRBaseMap)934 EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
935 DenseMap<SDValue, Register> &VRBaseMap) {
936 unsigned Opc = Node->getMachineOpcode();
937
938 // Handle subreg insert/extract specially
939 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
940 Opc == TargetOpcode::INSERT_SUBREG ||
941 Opc == TargetOpcode::SUBREG_TO_REG) {
942 EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
943 return;
944 }
945
946 // Handle COPY_TO_REGCLASS specially.
947 if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
948 EmitCopyToRegClassNode(Node, VRBaseMap);
949 return;
950 }
951
952 // Handle REG_SEQUENCE specially.
953 if (Opc == TargetOpcode::REG_SEQUENCE) {
954 EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
955 return;
956 }
957
958 if (Opc == TargetOpcode::IMPLICIT_DEF)
959 // We want a unique VR for each IMPLICIT_DEF use.
960 return;
961
962 const MCInstrDesc &II = TII->get(Opc);
963 unsigned NumResults = CountResults(Node);
964 unsigned NumDefs = II.getNumDefs();
965 const MCPhysReg *ScratchRegs = nullptr;
966
967 // Handle STACKMAP and PATCHPOINT specially and then use the generic code.
968 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
969 // Stackmaps do not have arguments and do not preserve their calling
970 // convention. However, to simplify runtime support, they clobber the same
971 // scratch registers as AnyRegCC.
972 unsigned CC = CallingConv::AnyReg;
973 if (Opc == TargetOpcode::PATCHPOINT) {
974 CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
975 NumDefs = NumResults;
976 }
977 ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
978 } else if (Opc == TargetOpcode::STATEPOINT) {
979 NumDefs = NumResults;
980 }
981
982 unsigned NumImpUses = 0;
983 unsigned NodeOperands =
984 countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
985 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
986 II.isVariadic() && II.variadicOpsAreDefs();
987 bool HasPhysRegOuts = NumResults > NumDefs &&
988 II.getImplicitDefs() != nullptr && !HasVRegVariadicDefs;
989 #ifndef NDEBUG
990 unsigned NumMIOperands = NodeOperands + NumResults;
991 if (II.isVariadic())
992 assert(NumMIOperands >= II.getNumOperands() &&
993 "Too few operands for a variadic node!");
994 else
995 assert(NumMIOperands >= II.getNumOperands() &&
996 NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() +
997 NumImpUses &&
998 "#operands for dag node doesn't match .td file!");
999 #endif
1000
1001 // Create the new machine instruction.
1002 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II);
1003
1004 // Add result register values for things that are defined by this
1005 // instruction.
1006 if (NumResults) {
1007 CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap);
1008
1009 // Transfer any IR flags from the SDNode to the MachineInstr
1010 MachineInstr *MI = MIB.getInstr();
1011 const SDNodeFlags Flags = Node->getFlags();
1012 if (Flags.hasNoSignedZeros())
1013 MI->setFlag(MachineInstr::MIFlag::FmNsz);
1014
1015 if (Flags.hasAllowReciprocal())
1016 MI->setFlag(MachineInstr::MIFlag::FmArcp);
1017
1018 if (Flags.hasNoNaNs())
1019 MI->setFlag(MachineInstr::MIFlag::FmNoNans);
1020
1021 if (Flags.hasNoInfs())
1022 MI->setFlag(MachineInstr::MIFlag::FmNoInfs);
1023
1024 if (Flags.hasAllowContract())
1025 MI->setFlag(MachineInstr::MIFlag::FmContract);
1026
1027 if (Flags.hasApproximateFuncs())
1028 MI->setFlag(MachineInstr::MIFlag::FmAfn);
1029
1030 if (Flags.hasAllowReassociation())
1031 MI->setFlag(MachineInstr::MIFlag::FmReassoc);
1032
1033 if (Flags.hasNoUnsignedWrap())
1034 MI->setFlag(MachineInstr::MIFlag::NoUWrap);
1035
1036 if (Flags.hasNoSignedWrap())
1037 MI->setFlag(MachineInstr::MIFlag::NoSWrap);
1038
1039 if (Flags.hasExact())
1040 MI->setFlag(MachineInstr::MIFlag::IsExact);
1041
1042 if (Flags.hasNoFPExcept())
1043 MI->setFlag(MachineInstr::MIFlag::NoFPExcept);
1044 }
1045
1046 // Emit all of the actual operands of this instruction, adding them to the
1047 // instruction as appropriate.
1048 bool HasOptPRefs = NumDefs > NumResults;
1049 assert((!HasOptPRefs || !HasPhysRegOuts) &&
1050 "Unable to cope with optional defs and phys regs defs!");
1051 unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0;
1052 for (unsigned i = NumSkip; i != NodeOperands; ++i)
1053 AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II,
1054 VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
1055
1056 // Add scratch registers as implicit def and early clobber
1057 if (ScratchRegs)
1058 for (unsigned i = 0; ScratchRegs[i]; ++i)
1059 MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
1060 RegState::EarlyClobber);
1061
1062 // Set the memory reference descriptions of this instruction now that it is
1063 // part of the function.
1064 MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands());
1065
1066 // Insert the instruction into position in the block. This needs to
1067 // happen before any custom inserter hook is called so that the
1068 // hook knows where in the block to insert the replacement code.
1069 MBB->insert(InsertPos, MIB);
1070
1071 // The MachineInstr may also define physregs instead of virtregs. These
1072 // physreg values can reach other instructions in different ways:
1073 //
1074 // 1. When there is a use of a Node value beyond the explicitly defined
1075 // virtual registers, we emit a CopyFromReg for one of the implicitly
1076 // defined physregs. This only happens when HasPhysRegOuts is true.
1077 //
1078 // 2. A CopyFromReg reading a physreg may be glued to this instruction.
1079 //
1080 // 3. A glued instruction may implicitly use a physreg.
1081 //
1082 // 4. A glued instruction may use a RegisterSDNode operand.
1083 //
1084 // Collect all the used physreg defs, and make sure that any unused physreg
1085 // defs are marked as dead.
1086 SmallVector<Register, 8> UsedRegs;
1087
1088 // Additional results must be physical register defs.
1089 if (HasPhysRegOuts) {
1090 for (unsigned i = NumDefs; i < NumResults; ++i) {
1091 Register Reg = II.getImplicitDefs()[i - NumDefs];
1092 if (!Node->hasAnyUseOfValue(i))
1093 continue;
1094 // This implicitly defined physreg has a use.
1095 UsedRegs.push_back(Reg);
1096 EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
1097 }
1098 }
1099
1100 // Scan the glue chain for any used physregs.
1101 if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
1102 for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
1103 if (F->getOpcode() == ISD::CopyFromReg) {
1104 UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
1105 continue;
1106 } else if (F->getOpcode() == ISD::CopyToReg) {
1107 // Skip CopyToReg nodes that are internal to the glue chain.
1108 continue;
1109 }
1110 // Collect declared implicit uses.
1111 const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
1112 UsedRegs.append(MCID.getImplicitUses(),
1113 MCID.getImplicitUses() + MCID.getNumImplicitUses());
1114 // In addition to declared implicit uses, we must also check for
1115 // direct RegisterSDNode operands.
1116 for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
1117 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
1118 Register Reg = R->getReg();
1119 if (Reg.isPhysical())
1120 UsedRegs.push_back(Reg);
1121 }
1122 }
1123 }
1124
1125 // Finally mark unused registers as dead.
1126 if (!UsedRegs.empty() || II.getImplicitDefs() || II.hasOptionalDef())
1127 MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
1128
1129 // STATEPOINT is too 'dynamic' to have meaningful machine description.
1130 // We have to manually tie operands.
1131 if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) {
1132 assert(!HasPhysRegOuts && "STATEPOINT mishandled");
1133 MachineInstr *MI = MIB;
1134 unsigned Def = 0;
1135 int First = StatepointOpers(MI).getFirstGCPtrIdx();
1136 assert(First > 0 && "Statepoint has Defs but no GC ptr list");
1137 unsigned Use = (unsigned)First;
1138 while (Def < NumDefs) {
1139 if (MI->getOperand(Use).isReg())
1140 MI->tieOperands(Def++, Use);
1141 Use = StackMaps::getNextMetaArgIdx(MI, Use);
1142 }
1143 }
1144
1145 // Run post-isel target hook to adjust this instruction if needed.
1146 if (II.hasPostISelHook())
1147 TLI->AdjustInstrPostInstrSelection(*MIB, Node);
1148 }
1149
1150 /// EmitSpecialNode - Generate machine code for a target-independent node and
1151 /// needed dependencies.
1152 void InstrEmitter::
EmitSpecialNode(SDNode * Node,bool IsClone,bool IsCloned,DenseMap<SDValue,Register> & VRBaseMap)1153 EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
1154 DenseMap<SDValue, Register> &VRBaseMap) {
1155 switch (Node->getOpcode()) {
1156 default:
1157 #ifndef NDEBUG
1158 Node->dump();
1159 #endif
1160 llvm_unreachable("This target-independent node should have been selected!");
1161 case ISD::EntryToken:
1162 llvm_unreachable("EntryToken should have been excluded from the schedule!");
1163 case ISD::MERGE_VALUES:
1164 case ISD::TokenFactor: // fall thru
1165 break;
1166 case ISD::CopyToReg: {
1167 Register DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1168 SDValue SrcVal = Node->getOperand(2);
1169 if (Register::isVirtualRegister(DestReg) && SrcVal.isMachineOpcode() &&
1170 SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
1171 // Instead building a COPY to that vreg destination, build an
1172 // IMPLICIT_DEF instruction instead.
1173 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1174 TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
1175 break;
1176 }
1177 Register SrcReg;
1178 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
1179 SrcReg = R->getReg();
1180 else
1181 SrcReg = getVR(SrcVal, VRBaseMap);
1182
1183 if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
1184 break;
1185
1186 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
1187 DestReg).addReg(SrcReg);
1188 break;
1189 }
1190 case ISD::CopyFromReg: {
1191 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1192 EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
1193 break;
1194 }
1195 case ISD::EH_LABEL:
1196 case ISD::ANNOTATION_LABEL: {
1197 unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL)
1198 ? TargetOpcode::EH_LABEL
1199 : TargetOpcode::ANNOTATION_LABEL;
1200 MCSymbol *S = cast<LabelSDNode>(Node)->getLabel();
1201 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1202 TII->get(Opc)).addSym(S);
1203 break;
1204 }
1205
1206 case ISD::LIFETIME_START:
1207 case ISD::LIFETIME_END: {
1208 unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START)
1209 ? TargetOpcode::LIFETIME_START
1210 : TargetOpcode::LIFETIME_END;
1211 auto *FI = cast<FrameIndexSDNode>(Node->getOperand(1));
1212 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1213 .addFrameIndex(FI->getIndex());
1214 break;
1215 }
1216
1217 case ISD::PSEUDO_PROBE: {
1218 unsigned TarOp = TargetOpcode::PSEUDO_PROBE;
1219 auto Guid = cast<PseudoProbeSDNode>(Node)->getGuid();
1220 auto Index = cast<PseudoProbeSDNode>(Node)->getIndex();
1221 auto Attr = cast<PseudoProbeSDNode>(Node)->getAttributes();
1222
1223 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1224 .addImm(Guid)
1225 .addImm(Index)
1226 .addImm((uint8_t)PseudoProbeType::Block)
1227 .addImm(Attr);
1228 break;
1229 }
1230
1231 case ISD::INLINEASM:
1232 case ISD::INLINEASM_BR: {
1233 unsigned NumOps = Node->getNumOperands();
1234 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1235 --NumOps; // Ignore the glue operand.
1236
1237 // Create the inline asm machine instruction.
1238 unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR
1239 ? TargetOpcode::INLINEASM_BR
1240 : TargetOpcode::INLINEASM;
1241 MachineInstrBuilder MIB =
1242 BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc));
1243
1244 // Add the asm string as an external symbol operand.
1245 SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
1246 const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
1247 MIB.addExternalSymbol(AsmStr);
1248
1249 // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
1250 // bits.
1251 int64_t ExtraInfo =
1252 cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
1253 getZExtValue();
1254 MIB.addImm(ExtraInfo);
1255
1256 // Remember to operand index of the group flags.
1257 SmallVector<unsigned, 8> GroupIdx;
1258
1259 // Remember registers that are part of early-clobber defs.
1260 SmallVector<unsigned, 8> ECRegs;
1261
1262 // Add all of the operand registers to the instruction.
1263 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1264 unsigned Flags =
1265 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1266 const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1267
1268 GroupIdx.push_back(MIB->getNumOperands());
1269 MIB.addImm(Flags);
1270 ++i; // Skip the ID value.
1271
1272 switch (InlineAsm::getKind(Flags)) {
1273 default: llvm_unreachable("Bad flags!");
1274 case InlineAsm::Kind_RegDef:
1275 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1276 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1277 // FIXME: Add dead flags for physical and virtual registers defined.
1278 // For now, mark physical register defs as implicit to help fast
1279 // regalloc. This makes inline asm look a lot like calls.
1280 MIB.addReg(Reg,
1281 RegState::Define |
1282 getImplRegState(Register::isPhysicalRegister(Reg)));
1283 }
1284 break;
1285 case InlineAsm::Kind_RegDefEarlyClobber:
1286 case InlineAsm::Kind_Clobber:
1287 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1288 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1289 MIB.addReg(Reg,
1290 RegState::Define | RegState::EarlyClobber |
1291 getImplRegState(Register::isPhysicalRegister(Reg)));
1292 ECRegs.push_back(Reg);
1293 }
1294 break;
1295 case InlineAsm::Kind_RegUse: // Use of register.
1296 case InlineAsm::Kind_Imm: // Immediate.
1297 case InlineAsm::Kind_Mem: // Addressing mode.
1298 // The addressing mode has been selected, just add all of the
1299 // operands to the machine instruction.
1300 for (unsigned j = 0; j != NumVals; ++j, ++i)
1301 AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
1302 /*IsDebug=*/false, IsClone, IsCloned);
1303
1304 // Manually set isTied bits.
1305 if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
1306 unsigned DefGroup = 0;
1307 if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
1308 unsigned DefIdx = GroupIdx[DefGroup] + 1;
1309 unsigned UseIdx = GroupIdx.back() + 1;
1310 for (unsigned j = 0; j != NumVals; ++j)
1311 MIB->tieOperands(DefIdx + j, UseIdx + j);
1312 }
1313 }
1314 break;
1315 }
1316 }
1317
1318 // GCC inline assembly allows input operands to also be early-clobber
1319 // output operands (so long as the operand is written only after it's
1320 // used), but this does not match the semantics of our early-clobber flag.
1321 // If an early-clobber operand register is also an input operand register,
1322 // then remove the early-clobber flag.
1323 for (unsigned Reg : ECRegs) {
1324 if (MIB->readsRegister(Reg, TRI)) {
1325 MachineOperand *MO =
1326 MIB->findRegisterDefOperand(Reg, false, false, TRI);
1327 assert(MO && "No def operand for clobbered register?");
1328 MO->setIsEarlyClobber(false);
1329 }
1330 }
1331
1332 // Get the mdnode from the asm if it exists and add it to the instruction.
1333 SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
1334 const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
1335 if (MD)
1336 MIB.addMetadata(MD);
1337
1338 MBB->insert(InsertPos, MIB);
1339 break;
1340 }
1341 }
1342 }
1343
1344 /// InstrEmitter - Construct an InstrEmitter and set it to start inserting
1345 /// at the given position in the given block.
InstrEmitter(const TargetMachine & TM,MachineBasicBlock * mbb,MachineBasicBlock::iterator insertpos,bool UseInstrRefDebugInfo)1346 InstrEmitter::InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb,
1347 MachineBasicBlock::iterator insertpos,
1348 bool UseInstrRefDebugInfo)
1349 : MF(mbb->getParent()), MRI(&MF->getRegInfo()),
1350 TII(MF->getSubtarget().getInstrInfo()),
1351 TRI(MF->getSubtarget().getRegisterInfo()),
1352 TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
1353 InsertPos(insertpos) {
1354 EmitDebugInstrRefs = UseInstrRefDebugInfo;
1355 }
1356