1 //==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the Emit routines for the SelectionDAG class, which creates 10 // MachineInstrs based on the decisions of the SelectionDAG instruction 11 // selection. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "InstrEmitter.h" 16 #include "SDNodeDbgValue.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/CodeGen/MachineConstantPool.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/SelectionDAG.h" 23 #include "llvm/CodeGen/StackMaps.h" 24 #include "llvm/CodeGen/TargetInstrInfo.h" 25 #include "llvm/CodeGen/TargetLowering.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/DebugInfo.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetMachine.h" 33 using namespace llvm; 34 35 #define DEBUG_TYPE "instr-emitter" 36 37 /// MinRCSize - Smallest register class we allow when constraining virtual 38 /// registers. If satisfying all register class constraints would require 39 /// using a smaller register class, emit a COPY to a new virtual register 40 /// instead. 41 const unsigned MinRCSize = 4; 42 43 /// CountResults - The results of target nodes have register or immediate 44 /// operands first, then an optional chain, and optional glue operands (which do 45 /// not go into the resulting MachineInstr). 46 unsigned InstrEmitter::CountResults(SDNode *Node) { 47 unsigned N = Node->getNumValues(); 48 while (N && Node->getValueType(N - 1) == MVT::Glue) 49 --N; 50 if (N && Node->getValueType(N - 1) == MVT::Other) 51 --N; // Skip over chain result. 52 return N; 53 } 54 55 /// countOperands - The inputs to target nodes have any actual inputs first, 56 /// followed by an optional chain operand, then an optional glue operand. 57 /// Compute the number of actual operands that will go into the resulting 58 /// MachineInstr. 59 /// 60 /// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding 61 /// the chain and glue. These operands may be implicit on the machine instr. 62 static unsigned countOperands(SDNode *Node, unsigned NumExpUses, 63 unsigned &NumImpUses) { 64 unsigned N = Node->getNumOperands(); 65 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 66 --N; 67 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other) 68 --N; // Ignore chain if it exists. 69 70 // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses. 71 NumImpUses = N - NumExpUses; 72 for (unsigned I = N; I > NumExpUses; --I) { 73 if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1))) 74 continue; 75 if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1))) 76 if (Register::isPhysicalRegister(RN->getReg())) 77 continue; 78 NumImpUses = N - I; 79 break; 80 } 81 82 return N; 83 } 84 85 /// Return starting index of GC operand list. 86 // FIXME: need a better place for this. Put it in StackMaps? 87 static unsigned getStatepointGCArgStartIdx(MachineInstr *MI) { 88 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && 89 "STATEPOINT node expected"); 90 unsigned OperIdx = StatepointOpers(MI).getNumDeoptArgsIdx(); 91 unsigned NumDeopts = MI->getOperand(OperIdx).getImm(); 92 ++OperIdx; 93 while (NumDeopts--) 94 OperIdx = StackMaps::getNextMetaArgIdx(MI, OperIdx); 95 return OperIdx; 96 } 97 98 /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an 99 /// implicit physical register output. 100 void InstrEmitter:: 101 EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned, 102 Register SrcReg, DenseMap<SDValue, Register> &VRBaseMap) { 103 Register VRBase; 104 if (SrcReg.isVirtual()) { 105 // Just use the input register directly! 106 SDValue Op(Node, ResNo); 107 if (IsClone) 108 VRBaseMap.erase(Op); 109 bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second; 110 (void)isNew; // Silence compiler warning. 111 assert(isNew && "Node emitted out of order - early"); 112 return; 113 } 114 115 // If the node is only used by a CopyToReg and the dest reg is a vreg, use 116 // the CopyToReg'd destination register instead of creating a new vreg. 117 bool MatchReg = true; 118 const TargetRegisterClass *UseRC = nullptr; 119 MVT VT = Node->getSimpleValueType(ResNo); 120 121 // Stick to the preferred register classes for legal types. 122 if (TLI->isTypeLegal(VT)) 123 UseRC = TLI->getRegClassFor(VT, Node->isDivergent()); 124 125 if (!IsClone && !IsCloned) 126 for (SDNode *User : Node->uses()) { 127 bool Match = true; 128 if (User->getOpcode() == ISD::CopyToReg && 129 User->getOperand(2).getNode() == Node && 130 User->getOperand(2).getResNo() == ResNo) { 131 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); 132 if (DestReg.isVirtual()) { 133 VRBase = DestReg; 134 Match = false; 135 } else if (DestReg != SrcReg) 136 Match = false; 137 } else { 138 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 139 SDValue Op = User->getOperand(i); 140 if (Op.getNode() != Node || Op.getResNo() != ResNo) 141 continue; 142 MVT VT = Node->getSimpleValueType(Op.getResNo()); 143 if (VT == MVT::Other || VT == MVT::Glue) 144 continue; 145 Match = false; 146 if (User->isMachineOpcode()) { 147 const MCInstrDesc &II = TII->get(User->getMachineOpcode()); 148 const TargetRegisterClass *RC = nullptr; 149 if (i+II.getNumDefs() < II.getNumOperands()) { 150 RC = TRI->getAllocatableClass( 151 TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF)); 152 } 153 if (!UseRC) 154 UseRC = RC; 155 else if (RC) { 156 const TargetRegisterClass *ComRC = 157 TRI->getCommonSubClass(UseRC, RC); 158 // If multiple uses expect disjoint register classes, we emit 159 // copies in AddRegisterOperand. 160 if (ComRC) 161 UseRC = ComRC; 162 } 163 } 164 } 165 } 166 MatchReg &= Match; 167 if (VRBase) 168 break; 169 } 170 171 const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr; 172 SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT); 173 174 // Figure out the register class to create for the destreg. 175 if (VRBase) { 176 DstRC = MRI->getRegClass(VRBase); 177 } else if (UseRC) { 178 assert(TRI->isTypeLegalForClass(*UseRC, VT) && 179 "Incompatible phys register def and uses!"); 180 DstRC = UseRC; 181 } else { 182 DstRC = TLI->getRegClassFor(VT, Node->isDivergent()); 183 } 184 185 // If all uses are reading from the src physical register and copying the 186 // register is either impossible or very expensive, then don't create a copy. 187 if (MatchReg && SrcRC->getCopyCost() < 0) { 188 VRBase = SrcReg; 189 } else { 190 // Create the reg, emit the copy. 191 VRBase = MRI->createVirtualRegister(DstRC); 192 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY), 193 VRBase).addReg(SrcReg); 194 } 195 196 SDValue Op(Node, ResNo); 197 if (IsClone) 198 VRBaseMap.erase(Op); 199 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; 200 (void)isNew; // Silence compiler warning. 201 assert(isNew && "Node emitted out of order - early"); 202 } 203 204 void InstrEmitter::CreateVirtualRegisters(SDNode *Node, 205 MachineInstrBuilder &MIB, 206 const MCInstrDesc &II, 207 bool IsClone, bool IsCloned, 208 DenseMap<SDValue, Register> &VRBaseMap) { 209 assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF && 210 "IMPLICIT_DEF should have been handled as a special case elsewhere!"); 211 212 unsigned NumResults = CountResults(Node); 213 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() && 214 II.isVariadic() && II.variadicOpsAreDefs(); 215 unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs(); 216 if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT) 217 NumVRegs = NumResults; 218 for (unsigned i = 0; i < NumVRegs; ++i) { 219 // If the specific node value is only used by a CopyToReg and the dest reg 220 // is a vreg in the same register class, use the CopyToReg'd destination 221 // register instead of creating a new vreg. 222 Register VRBase; 223 const TargetRegisterClass *RC = 224 TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF)); 225 // Always let the value type influence the used register class. The 226 // constraints on the instruction may be too lax to represent the value 227 // type correctly. For example, a 64-bit float (X86::FR64) can't live in 228 // the 32-bit float super-class (X86::FR32). 229 if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) { 230 const TargetRegisterClass *VTRC = TLI->getRegClassFor( 231 Node->getSimpleValueType(i), 232 (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC)))); 233 if (RC) 234 VTRC = TRI->getCommonSubClass(RC, VTRC); 235 if (VTRC) 236 RC = VTRC; 237 } 238 239 if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) { 240 // Optional def must be a physical register. 241 VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg(); 242 assert(VRBase.isPhysical()); 243 MIB.addReg(VRBase, RegState::Define); 244 } 245 246 if (!VRBase && !IsClone && !IsCloned) 247 for (SDNode *User : Node->uses()) { 248 if (User->getOpcode() == ISD::CopyToReg && 249 User->getOperand(2).getNode() == Node && 250 User->getOperand(2).getResNo() == i) { 251 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); 252 if (Register::isVirtualRegister(Reg)) { 253 const TargetRegisterClass *RegRC = MRI->getRegClass(Reg); 254 if (RegRC == RC) { 255 VRBase = Reg; 256 MIB.addReg(VRBase, RegState::Define); 257 break; 258 } 259 } 260 } 261 } 262 263 // Create the result registers for this node and add the result regs to 264 // the machine instruction. 265 if (VRBase == 0) { 266 assert(RC && "Isn't a register operand!"); 267 VRBase = MRI->createVirtualRegister(RC); 268 MIB.addReg(VRBase, RegState::Define); 269 } 270 271 // If this def corresponds to a result of the SDNode insert the VRBase into 272 // the lookup map. 273 if (i < NumResults) { 274 SDValue Op(Node, i); 275 if (IsClone) 276 VRBaseMap.erase(Op); 277 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; 278 (void)isNew; // Silence compiler warning. 279 assert(isNew && "Node emitted out of order - early"); 280 } 281 } 282 } 283 284 /// getVR - Return the virtual register corresponding to the specified result 285 /// of the specified node. 286 Register InstrEmitter::getVR(SDValue Op, 287 DenseMap<SDValue, Register> &VRBaseMap) { 288 if (Op.isMachineOpcode() && 289 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { 290 // Add an IMPLICIT_DEF instruction before every use. 291 // IMPLICIT_DEF can produce any type of result so its MCInstrDesc 292 // does not include operand register class info. 293 const TargetRegisterClass *RC = TLI->getRegClassFor( 294 Op.getSimpleValueType(), Op.getNode()->isDivergent()); 295 Register VReg = MRI->createVirtualRegister(RC); 296 BuildMI(*MBB, InsertPos, Op.getDebugLoc(), 297 TII->get(TargetOpcode::IMPLICIT_DEF), VReg); 298 return VReg; 299 } 300 301 DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op); 302 assert(I != VRBaseMap.end() && "Node emitted out of order - late"); 303 return I->second; 304 } 305 306 307 /// AddRegisterOperand - Add the specified register as an operand to the 308 /// specified machine instr. Insert register copies if the register is 309 /// not in the required register class. 310 void 311 InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB, 312 SDValue Op, 313 unsigned IIOpNum, 314 const MCInstrDesc *II, 315 DenseMap<SDValue, Register> &VRBaseMap, 316 bool IsDebug, bool IsClone, bool IsCloned) { 317 assert(Op.getValueType() != MVT::Other && 318 Op.getValueType() != MVT::Glue && 319 "Chain and glue operands should occur at end of operand list!"); 320 // Get/emit the operand. 321 Register VReg = getVR(Op, VRBaseMap); 322 323 const MCInstrDesc &MCID = MIB->getDesc(); 324 bool isOptDef = IIOpNum < MCID.getNumOperands() && 325 MCID.OpInfo[IIOpNum].isOptionalDef(); 326 327 // If the instruction requires a register in a different class, create 328 // a new virtual register and copy the value into it, but first attempt to 329 // shrink VReg's register class within reason. For example, if VReg == GR32 330 // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP. 331 if (II) { 332 const TargetRegisterClass *OpRC = nullptr; 333 if (IIOpNum < II->getNumOperands()) 334 OpRC = TII->getRegClass(*II, IIOpNum, TRI, *MF); 335 336 if (OpRC) { 337 const TargetRegisterClass *ConstrainedRC 338 = MRI->constrainRegClass(VReg, OpRC, MinRCSize); 339 if (!ConstrainedRC) { 340 OpRC = TRI->getAllocatableClass(OpRC); 341 assert(OpRC && "Constraints cannot be fulfilled for allocation"); 342 Register NewVReg = MRI->createVirtualRegister(OpRC); 343 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), 344 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); 345 VReg = NewVReg; 346 } else { 347 assert(ConstrainedRC->isAllocatable() && 348 "Constraining an allocatable VReg produced an unallocatable class?"); 349 } 350 } 351 } 352 353 // If this value has only one use, that use is a kill. This is a 354 // conservative approximation. InstrEmitter does trivial coalescing 355 // with CopyFromReg nodes, so don't emit kill flags for them. 356 // Avoid kill flags on Schedule cloned nodes, since there will be 357 // multiple uses. 358 // Tied operands are never killed, so we need to check that. And that 359 // means we need to determine the index of the operand. 360 bool isKill = Op.hasOneUse() && 361 Op.getNode()->getOpcode() != ISD::CopyFromReg && 362 !IsDebug && 363 !(IsClone || IsCloned); 364 if (isKill) { 365 unsigned Idx = MIB->getNumOperands(); 366 while (Idx > 0 && 367 MIB->getOperand(Idx-1).isReg() && 368 MIB->getOperand(Idx-1).isImplicit()) 369 --Idx; 370 bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1; 371 if (isTied) 372 isKill = false; 373 } 374 375 MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) | 376 getDebugRegState(IsDebug)); 377 } 378 379 /// AddOperand - Add the specified operand to the specified machine instr. II 380 /// specifies the instruction information for the node, and IIOpNum is the 381 /// operand number (in the II) that we are adding. 382 void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, 383 SDValue Op, 384 unsigned IIOpNum, 385 const MCInstrDesc *II, 386 DenseMap<SDValue, Register> &VRBaseMap, 387 bool IsDebug, bool IsClone, bool IsCloned) { 388 if (Op.isMachineOpcode()) { 389 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, 390 IsDebug, IsClone, IsCloned); 391 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 392 MIB.addImm(C->getSExtValue()); 393 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) { 394 MIB.addFPImm(F->getConstantFPValue()); 395 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) { 396 Register VReg = R->getReg(); 397 MVT OpVT = Op.getSimpleValueType(); 398 const TargetRegisterClass *IIRC = 399 II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI, *MF)) 400 : nullptr; 401 const TargetRegisterClass *OpRC = 402 TLI->isTypeLegal(OpVT) 403 ? TLI->getRegClassFor(OpVT, 404 Op.getNode()->isDivergent() || 405 (IIRC && TRI->isDivergentRegClass(IIRC))) 406 : nullptr; 407 408 if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) { 409 Register NewVReg = MRI->createVirtualRegister(IIRC); 410 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), 411 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg); 412 VReg = NewVReg; 413 } 414 // Turn additional physreg operands into implicit uses on non-variadic 415 // instructions. This is used by call and return instructions passing 416 // arguments in registers. 417 bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic()); 418 MIB.addReg(VReg, getImplRegState(Imp)); 419 } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) { 420 MIB.addRegMask(RM->getRegMask()); 421 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) { 422 MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(), 423 TGA->getTargetFlags()); 424 } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) { 425 MIB.addMBB(BBNode->getBasicBlock()); 426 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) { 427 MIB.addFrameIndex(FI->getIndex()); 428 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) { 429 MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags()); 430 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) { 431 int Offset = CP->getOffset(); 432 Align Alignment = CP->getAlign(); 433 434 unsigned Idx; 435 MachineConstantPool *MCP = MF->getConstantPool(); 436 if (CP->isMachineConstantPoolEntry()) 437 Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Alignment); 438 else 439 Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Alignment); 440 MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags()); 441 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) { 442 MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags()); 443 } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) { 444 MIB.addSym(SymNode->getMCSymbol()); 445 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) { 446 MIB.addBlockAddress(BA->getBlockAddress(), 447 BA->getOffset(), 448 BA->getTargetFlags()); 449 } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) { 450 MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags()); 451 } else { 452 assert(Op.getValueType() != MVT::Other && 453 Op.getValueType() != MVT::Glue && 454 "Chain and glue operands should occur at end of operand list!"); 455 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, 456 IsDebug, IsClone, IsCloned); 457 } 458 } 459 460 Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx, 461 MVT VT, bool isDivergent, const DebugLoc &DL) { 462 const TargetRegisterClass *VRC = MRI->getRegClass(VReg); 463 const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx); 464 465 // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg 466 // within reason. 467 if (RC && RC != VRC) 468 RC = MRI->constrainRegClass(VReg, RC, MinRCSize); 469 470 // VReg has been adjusted. It can be used with SubIdx operands now. 471 if (RC) 472 return VReg; 473 474 // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual 475 // register instead. 476 RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx); 477 assert(RC && "No legal register class for VT supports that SubIdx"); 478 Register NewReg = MRI->createVirtualRegister(RC); 479 BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg) 480 .addReg(VReg); 481 return NewReg; 482 } 483 484 /// EmitSubregNode - Generate machine code for subreg nodes. 485 /// 486 void InstrEmitter::EmitSubregNode(SDNode *Node, 487 DenseMap<SDValue, Register> &VRBaseMap, 488 bool IsClone, bool IsCloned) { 489 Register VRBase; 490 unsigned Opc = Node->getMachineOpcode(); 491 492 // If the node is only used by a CopyToReg and the dest reg is a vreg, use 493 // the CopyToReg'd destination register instead of creating a new vreg. 494 for (SDNode *User : Node->uses()) { 495 if (User->getOpcode() == ISD::CopyToReg && 496 User->getOperand(2).getNode() == Node) { 497 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); 498 if (DestReg.isVirtual()) { 499 VRBase = DestReg; 500 break; 501 } 502 } 503 } 504 505 if (Opc == TargetOpcode::EXTRACT_SUBREG) { 506 // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no 507 // constraints on the %dst register, COPY can target all legal register 508 // classes. 509 unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 510 const TargetRegisterClass *TRC = 511 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent()); 512 513 Register Reg; 514 MachineInstr *DefMI; 515 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0)); 516 if (R && Register::isPhysicalRegister(R->getReg())) { 517 Reg = R->getReg(); 518 DefMI = nullptr; 519 } else { 520 Reg = R ? R->getReg() : getVR(Node->getOperand(0), VRBaseMap); 521 DefMI = MRI->getVRegDef(Reg); 522 } 523 524 Register SrcReg, DstReg; 525 unsigned DefSubIdx; 526 if (DefMI && 527 TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) && 528 SubIdx == DefSubIdx && 529 TRC == MRI->getRegClass(SrcReg)) { 530 // Optimize these: 531 // r1025 = s/zext r1024, 4 532 // r1026 = extract_subreg r1025, 4 533 // to a copy 534 // r1026 = copy r1024 535 VRBase = MRI->createVirtualRegister(TRC); 536 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), 537 TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg); 538 MRI->clearKillFlags(SrcReg); 539 } else { 540 // Reg may not support a SubIdx sub-register, and we may need to 541 // constrain its register class or issue a COPY to a compatible register 542 // class. 543 if (Reg.isVirtual()) 544 Reg = ConstrainForSubReg(Reg, SubIdx, 545 Node->getOperand(0).getSimpleValueType(), 546 Node->isDivergent(), Node->getDebugLoc()); 547 // Create the destreg if it is missing. 548 if (!VRBase) 549 VRBase = MRI->createVirtualRegister(TRC); 550 551 // Create the extract_subreg machine instruction. 552 MachineInstrBuilder CopyMI = 553 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), 554 TII->get(TargetOpcode::COPY), VRBase); 555 if (Reg.isVirtual()) 556 CopyMI.addReg(Reg, 0, SubIdx); 557 else 558 CopyMI.addReg(TRI->getSubReg(Reg, SubIdx)); 559 } 560 } else if (Opc == TargetOpcode::INSERT_SUBREG || 561 Opc == TargetOpcode::SUBREG_TO_REG) { 562 SDValue N0 = Node->getOperand(0); 563 SDValue N1 = Node->getOperand(1); 564 SDValue N2 = Node->getOperand(2); 565 unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue(); 566 567 // Figure out the register class to create for the destreg. It should be 568 // the largest legal register class supporting SubIdx sub-registers. 569 // RegisterCoalescer will constrain it further if it decides to eliminate 570 // the INSERT_SUBREG instruction. 571 // 572 // %dst = INSERT_SUBREG %src, %sub, SubIdx 573 // 574 // is lowered by TwoAddressInstructionPass to: 575 // 576 // %dst = COPY %src 577 // %dst:SubIdx = COPY %sub 578 // 579 // There is no constraint on the %src register class. 580 // 581 const TargetRegisterClass *SRC = 582 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent()); 583 SRC = TRI->getSubClassWithSubReg(SRC, SubIdx); 584 assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG"); 585 586 if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase))) 587 VRBase = MRI->createVirtualRegister(SRC); 588 589 // Create the insert_subreg or subreg_to_reg machine instruction. 590 MachineInstrBuilder MIB = 591 BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase); 592 593 // If creating a subreg_to_reg, then the first input operand 594 // is an implicit value immediate, otherwise it's a register 595 if (Opc == TargetOpcode::SUBREG_TO_REG) { 596 const ConstantSDNode *SD = cast<ConstantSDNode>(N0); 597 MIB.addImm(SD->getZExtValue()); 598 } else 599 AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false, 600 IsClone, IsCloned); 601 // Add the subregister being inserted 602 AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false, 603 IsClone, IsCloned); 604 MIB.addImm(SubIdx); 605 MBB->insert(InsertPos, MIB); 606 } else 607 llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg"); 608 609 SDValue Op(Node, 0); 610 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; 611 (void)isNew; // Silence compiler warning. 612 assert(isNew && "Node emitted out of order - early"); 613 } 614 615 /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes. 616 /// COPY_TO_REGCLASS is just a normal copy, except that the destination 617 /// register is constrained to be in a particular register class. 618 /// 619 void 620 InstrEmitter::EmitCopyToRegClassNode(SDNode *Node, 621 DenseMap<SDValue, Register> &VRBaseMap) { 622 unsigned VReg = getVR(Node->getOperand(0), VRBaseMap); 623 624 // Create the new VReg in the destination class and emit a copy. 625 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 626 const TargetRegisterClass *DstRC = 627 TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx)); 628 Register NewVReg = MRI->createVirtualRegister(DstRC); 629 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY), 630 NewVReg).addReg(VReg); 631 632 SDValue Op(Node, 0); 633 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second; 634 (void)isNew; // Silence compiler warning. 635 assert(isNew && "Node emitted out of order - early"); 636 } 637 638 /// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes. 639 /// 640 void InstrEmitter::EmitRegSequence(SDNode *Node, 641 DenseMap<SDValue, Register> &VRBaseMap, 642 bool IsClone, bool IsCloned) { 643 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue(); 644 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); 645 Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC)); 646 const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE); 647 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg); 648 unsigned NumOps = Node->getNumOperands(); 649 // If the input pattern has a chain, then the root of the corresponding 650 // output pattern will get a chain as well. This can happen to be a 651 // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults). 652 if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other) 653 --NumOps; // Ignore chain if it exists. 654 655 assert((NumOps & 1) == 1 && 656 "REG_SEQUENCE must have an odd number of operands!"); 657 for (unsigned i = 1; i != NumOps; ++i) { 658 SDValue Op = Node->getOperand(i); 659 if ((i & 1) == 0) { 660 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1)); 661 // Skip physical registers as they don't have a vreg to get and we'll 662 // insert copies for them in TwoAddressInstructionPass anyway. 663 if (!R || !Register::isPhysicalRegister(R->getReg())) { 664 unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue(); 665 unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap); 666 const TargetRegisterClass *TRC = MRI->getRegClass(SubReg); 667 const TargetRegisterClass *SRC = 668 TRI->getMatchingSuperRegClass(RC, TRC, SubIdx); 669 if (SRC && SRC != RC) { 670 MRI->setRegClass(NewVReg, SRC); 671 RC = SRC; 672 } 673 } 674 } 675 AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false, 676 IsClone, IsCloned); 677 } 678 679 MBB->insert(InsertPos, MIB); 680 SDValue Op(Node, 0); 681 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second; 682 (void)isNew; // Silence compiler warning. 683 assert(isNew && "Node emitted out of order - early"); 684 } 685 686 /// EmitDbgValue - Generate machine instruction for a dbg_value node. 687 /// 688 MachineInstr * 689 InstrEmitter::EmitDbgValue(SDDbgValue *SD, 690 DenseMap<SDValue, Register> &VRBaseMap) { 691 MDNode *Var = SD->getVariable(); 692 MDNode *Expr = SD->getExpression(); 693 DebugLoc DL = SD->getDebugLoc(); 694 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && 695 "Expected inlined-at fields to agree"); 696 697 SD->setIsEmitted(); 698 699 if (SD->isInvalidated()) { 700 // An invalidated SDNode must generate an undef DBG_VALUE: although the 701 // original value is no longer computed, earlier DBG_VALUEs live ranges 702 // must not leak into later code. 703 auto MIB = BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE)); 704 MIB.addReg(0U); 705 MIB.addReg(0U, RegState::Debug); 706 MIB.addMetadata(Var); 707 MIB.addMetadata(Expr); 708 return &*MIB; 709 } 710 711 if (SD->getKind() == SDDbgValue::FRAMEIX) { 712 // Stack address; this needs to be lowered in target-dependent fashion. 713 // EmitTargetCodeForFrameDebugValue is responsible for allocation. 714 auto FrameMI = BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE)) 715 .addFrameIndex(SD->getFrameIx()); 716 if (SD->isIndirect()) 717 // Push [fi + 0] onto the DIExpression stack. 718 FrameMI.addImm(0); 719 else 720 // Push fi onto the DIExpression stack. 721 FrameMI.addReg(0); 722 return FrameMI.addMetadata(Var).addMetadata(Expr); 723 } 724 // Otherwise, we're going to create an instruction here. 725 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE); 726 MachineInstrBuilder MIB = BuildMI(*MF, DL, II); 727 if (SD->getKind() == SDDbgValue::SDNODE) { 728 SDNode *Node = SD->getSDNode(); 729 SDValue Op = SDValue(Node, SD->getResNo()); 730 // It's possible we replaced this SDNode with other(s) and therefore 731 // didn't generate code for it. It's better to catch these cases where 732 // they happen and transfer the debug info, but trying to guarantee that 733 // in all cases would be very fragile; this is a safeguard for any 734 // that were missed. 735 DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op); 736 if (I==VRBaseMap.end()) 737 MIB.addReg(0U); // undef 738 else 739 AddOperand(MIB, Op, (*MIB).getNumOperands(), &II, VRBaseMap, 740 /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false); 741 } else if (SD->getKind() == SDDbgValue::VREG) { 742 MIB.addReg(SD->getVReg(), RegState::Debug); 743 } else if (SD->getKind() == SDDbgValue::CONST) { 744 const Value *V = SD->getConst(); 745 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 746 if (CI->getBitWidth() > 64) 747 MIB.addCImm(CI); 748 else 749 MIB.addImm(CI->getSExtValue()); 750 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { 751 MIB.addFPImm(CF); 752 } else if (isa<ConstantPointerNull>(V)) { 753 // Note: This assumes that all nullptr constants are zero-valued. 754 MIB.addImm(0); 755 } else { 756 // Could be an Undef. In any case insert an Undef so we can see what we 757 // dropped. 758 MIB.addReg(0U); 759 } 760 } else { 761 // Insert an Undef so we can see what we dropped. 762 MIB.addReg(0U); 763 } 764 765 // Indirect addressing is indicated by an Imm as the second parameter. 766 if (SD->isIndirect()) 767 MIB.addImm(0U); 768 else 769 MIB.addReg(0U, RegState::Debug); 770 771 MIB.addMetadata(Var); 772 MIB.addMetadata(Expr); 773 774 return &*MIB; 775 } 776 777 MachineInstr * 778 InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) { 779 MDNode *Label = SD->getLabel(); 780 DebugLoc DL = SD->getDebugLoc(); 781 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && 782 "Expected inlined-at fields to agree"); 783 784 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_LABEL); 785 MachineInstrBuilder MIB = BuildMI(*MF, DL, II); 786 MIB.addMetadata(Label); 787 788 return &*MIB; 789 } 790 791 /// EmitMachineNode - Generate machine code for a target-specific node and 792 /// needed dependencies. 793 /// 794 void InstrEmitter:: 795 EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, 796 DenseMap<SDValue, Register> &VRBaseMap) { 797 unsigned Opc = Node->getMachineOpcode(); 798 799 // Handle subreg insert/extract specially 800 if (Opc == TargetOpcode::EXTRACT_SUBREG || 801 Opc == TargetOpcode::INSERT_SUBREG || 802 Opc == TargetOpcode::SUBREG_TO_REG) { 803 EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned); 804 return; 805 } 806 807 // Handle COPY_TO_REGCLASS specially. 808 if (Opc == TargetOpcode::COPY_TO_REGCLASS) { 809 EmitCopyToRegClassNode(Node, VRBaseMap); 810 return; 811 } 812 813 // Handle REG_SEQUENCE specially. 814 if (Opc == TargetOpcode::REG_SEQUENCE) { 815 EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned); 816 return; 817 } 818 819 if (Opc == TargetOpcode::IMPLICIT_DEF) 820 // We want a unique VR for each IMPLICIT_DEF use. 821 return; 822 823 const MCInstrDesc &II = TII->get(Opc); 824 unsigned NumResults = CountResults(Node); 825 unsigned NumDefs = II.getNumDefs(); 826 const MCPhysReg *ScratchRegs = nullptr; 827 828 // Handle STACKMAP and PATCHPOINT specially and then use the generic code. 829 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) { 830 // Stackmaps do not have arguments and do not preserve their calling 831 // convention. However, to simplify runtime support, they clobber the same 832 // scratch registers as AnyRegCC. 833 unsigned CC = CallingConv::AnyReg; 834 if (Opc == TargetOpcode::PATCHPOINT) { 835 CC = Node->getConstantOperandVal(PatchPointOpers::CCPos); 836 NumDefs = NumResults; 837 } 838 ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC); 839 } else if (Opc == TargetOpcode::STATEPOINT) { 840 NumDefs = NumResults; 841 } 842 843 unsigned NumImpUses = 0; 844 unsigned NodeOperands = 845 countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses); 846 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() && 847 II.isVariadic() && II.variadicOpsAreDefs(); 848 bool HasPhysRegOuts = NumResults > NumDefs && 849 II.getImplicitDefs() != nullptr && !HasVRegVariadicDefs; 850 #ifndef NDEBUG 851 unsigned NumMIOperands = NodeOperands + NumResults; 852 if (II.isVariadic()) 853 assert(NumMIOperands >= II.getNumOperands() && 854 "Too few operands for a variadic node!"); 855 else 856 assert(NumMIOperands >= II.getNumOperands() && 857 NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + 858 NumImpUses && 859 "#operands for dag node doesn't match .td file!"); 860 #endif 861 862 // Create the new machine instruction. 863 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II); 864 865 // Add result register values for things that are defined by this 866 // instruction. 867 if (NumResults) { 868 CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap); 869 870 // Transfer any IR flags from the SDNode to the MachineInstr 871 MachineInstr *MI = MIB.getInstr(); 872 const SDNodeFlags Flags = Node->getFlags(); 873 if (Flags.hasNoSignedZeros()) 874 MI->setFlag(MachineInstr::MIFlag::FmNsz); 875 876 if (Flags.hasAllowReciprocal()) 877 MI->setFlag(MachineInstr::MIFlag::FmArcp); 878 879 if (Flags.hasNoNaNs()) 880 MI->setFlag(MachineInstr::MIFlag::FmNoNans); 881 882 if (Flags.hasNoInfs()) 883 MI->setFlag(MachineInstr::MIFlag::FmNoInfs); 884 885 if (Flags.hasAllowContract()) 886 MI->setFlag(MachineInstr::MIFlag::FmContract); 887 888 if (Flags.hasApproximateFuncs()) 889 MI->setFlag(MachineInstr::MIFlag::FmAfn); 890 891 if (Flags.hasAllowReassociation()) 892 MI->setFlag(MachineInstr::MIFlag::FmReassoc); 893 894 if (Flags.hasNoUnsignedWrap()) 895 MI->setFlag(MachineInstr::MIFlag::NoUWrap); 896 897 if (Flags.hasNoSignedWrap()) 898 MI->setFlag(MachineInstr::MIFlag::NoSWrap); 899 900 if (Flags.hasExact()) 901 MI->setFlag(MachineInstr::MIFlag::IsExact); 902 903 if (Flags.hasNoFPExcept()) 904 MI->setFlag(MachineInstr::MIFlag::NoFPExcept); 905 } 906 907 // Emit all of the actual operands of this instruction, adding them to the 908 // instruction as appropriate. 909 bool HasOptPRefs = NumDefs > NumResults; 910 assert((!HasOptPRefs || !HasPhysRegOuts) && 911 "Unable to cope with optional defs and phys regs defs!"); 912 unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0; 913 for (unsigned i = NumSkip; i != NodeOperands; ++i) 914 AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II, 915 VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned); 916 917 // Add scratch registers as implicit def and early clobber 918 if (ScratchRegs) 919 for (unsigned i = 0; ScratchRegs[i]; ++i) 920 MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine | 921 RegState::EarlyClobber); 922 923 // Set the memory reference descriptions of this instruction now that it is 924 // part of the function. 925 MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands()); 926 927 // Insert the instruction into position in the block. This needs to 928 // happen before any custom inserter hook is called so that the 929 // hook knows where in the block to insert the replacement code. 930 MBB->insert(InsertPos, MIB); 931 932 // The MachineInstr may also define physregs instead of virtregs. These 933 // physreg values can reach other instructions in different ways: 934 // 935 // 1. When there is a use of a Node value beyond the explicitly defined 936 // virtual registers, we emit a CopyFromReg for one of the implicitly 937 // defined physregs. This only happens when HasPhysRegOuts is true. 938 // 939 // 2. A CopyFromReg reading a physreg may be glued to this instruction. 940 // 941 // 3. A glued instruction may implicitly use a physreg. 942 // 943 // 4. A glued instruction may use a RegisterSDNode operand. 944 // 945 // Collect all the used physreg defs, and make sure that any unused physreg 946 // defs are marked as dead. 947 SmallVector<Register, 8> UsedRegs; 948 949 // Additional results must be physical register defs. 950 if (HasPhysRegOuts) { 951 for (unsigned i = NumDefs; i < NumResults; ++i) { 952 Register Reg = II.getImplicitDefs()[i - NumDefs]; 953 if (!Node->hasAnyUseOfValue(i)) 954 continue; 955 // This implicitly defined physreg has a use. 956 UsedRegs.push_back(Reg); 957 EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap); 958 } 959 } 960 961 // Scan the glue chain for any used physregs. 962 if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) { 963 for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) { 964 if (F->getOpcode() == ISD::CopyFromReg) { 965 UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg()); 966 continue; 967 } else if (F->getOpcode() == ISD::CopyToReg) { 968 // Skip CopyToReg nodes that are internal to the glue chain. 969 continue; 970 } 971 // Collect declared implicit uses. 972 const MCInstrDesc &MCID = TII->get(F->getMachineOpcode()); 973 UsedRegs.append(MCID.getImplicitUses(), 974 MCID.getImplicitUses() + MCID.getNumImplicitUses()); 975 // In addition to declared implicit uses, we must also check for 976 // direct RegisterSDNode operands. 977 for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i) 978 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) { 979 Register Reg = R->getReg(); 980 if (Reg.isPhysical()) 981 UsedRegs.push_back(Reg); 982 } 983 } 984 } 985 986 // Finally mark unused registers as dead. 987 if (!UsedRegs.empty() || II.getImplicitDefs() || II.hasOptionalDef()) 988 MIB->setPhysRegsDeadExcept(UsedRegs, *TRI); 989 990 // STATEPOINT is too 'dynamic' to have meaningful machine description. 991 // We have to manually tie operands. 992 if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) { 993 assert(!HasPhysRegOuts && "STATEPOINT mishandled"); 994 MachineInstr *MI = MIB; 995 unsigned Def = 0; 996 unsigned Use = getStatepointGCArgStartIdx(MI); 997 Use = StackMaps::getNextMetaArgIdx(MI, Use); // first derived 998 assert(Use < MI->getNumOperands()); 999 while (Def < NumDefs) { 1000 if (MI->getOperand(Use).isReg()) 1001 MI->tieOperands(Def++, Use); 1002 Use = StackMaps::getNextMetaArgIdx(MI, Use); // next base 1003 Use = StackMaps::getNextMetaArgIdx(MI, Use); // next derived 1004 } 1005 } 1006 1007 // Run post-isel target hook to adjust this instruction if needed. 1008 if (II.hasPostISelHook()) 1009 TLI->AdjustInstrPostInstrSelection(*MIB, Node); 1010 } 1011 1012 /// EmitSpecialNode - Generate machine code for a target-independent node and 1013 /// needed dependencies. 1014 void InstrEmitter:: 1015 EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, 1016 DenseMap<SDValue, Register> &VRBaseMap) { 1017 switch (Node->getOpcode()) { 1018 default: 1019 #ifndef NDEBUG 1020 Node->dump(); 1021 #endif 1022 llvm_unreachable("This target-independent node should have been selected!"); 1023 case ISD::EntryToken: 1024 llvm_unreachable("EntryToken should have been excluded from the schedule!"); 1025 case ISD::MERGE_VALUES: 1026 case ISD::TokenFactor: // fall thru 1027 break; 1028 case ISD::CopyToReg: { 1029 Register DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); 1030 SDValue SrcVal = Node->getOperand(2); 1031 if (Register::isVirtualRegister(DestReg) && SrcVal.isMachineOpcode() && 1032 SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { 1033 // Instead building a COPY to that vreg destination, build an 1034 // IMPLICIT_DEF instruction instead. 1035 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), 1036 TII->get(TargetOpcode::IMPLICIT_DEF), DestReg); 1037 break; 1038 } 1039 Register SrcReg; 1040 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal)) 1041 SrcReg = R->getReg(); 1042 else 1043 SrcReg = getVR(SrcVal, VRBaseMap); 1044 1045 if (SrcReg == DestReg) // Coalesced away the copy? Ignore. 1046 break; 1047 1048 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY), 1049 DestReg).addReg(SrcReg); 1050 break; 1051 } 1052 case ISD::CopyFromReg: { 1053 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); 1054 EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap); 1055 break; 1056 } 1057 case ISD::EH_LABEL: 1058 case ISD::ANNOTATION_LABEL: { 1059 unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL) 1060 ? TargetOpcode::EH_LABEL 1061 : TargetOpcode::ANNOTATION_LABEL; 1062 MCSymbol *S = cast<LabelSDNode>(Node)->getLabel(); 1063 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), 1064 TII->get(Opc)).addSym(S); 1065 break; 1066 } 1067 1068 case ISD::LIFETIME_START: 1069 case ISD::LIFETIME_END: { 1070 unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) ? 1071 TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END; 1072 1073 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Node->getOperand(1)); 1074 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp)) 1075 .addFrameIndex(FI->getIndex()); 1076 break; 1077 } 1078 1079 case ISD::INLINEASM: 1080 case ISD::INLINEASM_BR: { 1081 unsigned NumOps = Node->getNumOperands(); 1082 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) 1083 --NumOps; // Ignore the glue operand. 1084 1085 // Create the inline asm machine instruction. 1086 unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR 1087 ? TargetOpcode::INLINEASM_BR 1088 : TargetOpcode::INLINEASM; 1089 MachineInstrBuilder MIB = 1090 BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc)); 1091 1092 // Add the asm string as an external symbol operand. 1093 SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString); 1094 const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol(); 1095 MIB.addExternalSymbol(AsmStr); 1096 1097 // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore 1098 // bits. 1099 int64_t ExtraInfo = 1100 cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))-> 1101 getZExtValue(); 1102 MIB.addImm(ExtraInfo); 1103 1104 // Remember to operand index of the group flags. 1105 SmallVector<unsigned, 8> GroupIdx; 1106 1107 // Remember registers that are part of early-clobber defs. 1108 SmallVector<unsigned, 8> ECRegs; 1109 1110 // Add all of the operand registers to the instruction. 1111 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { 1112 unsigned Flags = 1113 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); 1114 const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); 1115 1116 GroupIdx.push_back(MIB->getNumOperands()); 1117 MIB.addImm(Flags); 1118 ++i; // Skip the ID value. 1119 1120 switch (InlineAsm::getKind(Flags)) { 1121 default: llvm_unreachable("Bad flags!"); 1122 case InlineAsm::Kind_RegDef: 1123 for (unsigned j = 0; j != NumVals; ++j, ++i) { 1124 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); 1125 // FIXME: Add dead flags for physical and virtual registers defined. 1126 // For now, mark physical register defs as implicit to help fast 1127 // regalloc. This makes inline asm look a lot like calls. 1128 MIB.addReg(Reg, 1129 RegState::Define | 1130 getImplRegState(Register::isPhysicalRegister(Reg))); 1131 } 1132 break; 1133 case InlineAsm::Kind_RegDefEarlyClobber: 1134 case InlineAsm::Kind_Clobber: 1135 for (unsigned j = 0; j != NumVals; ++j, ++i) { 1136 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); 1137 MIB.addReg(Reg, 1138 RegState::Define | RegState::EarlyClobber | 1139 getImplRegState(Register::isPhysicalRegister(Reg))); 1140 ECRegs.push_back(Reg); 1141 } 1142 break; 1143 case InlineAsm::Kind_RegUse: // Use of register. 1144 case InlineAsm::Kind_Imm: // Immediate. 1145 case InlineAsm::Kind_Mem: // Addressing mode. 1146 // The addressing mode has been selected, just add all of the 1147 // operands to the machine instruction. 1148 for (unsigned j = 0; j != NumVals; ++j, ++i) 1149 AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap, 1150 /*IsDebug=*/false, IsClone, IsCloned); 1151 1152 // Manually set isTied bits. 1153 if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) { 1154 unsigned DefGroup = 0; 1155 if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) { 1156 unsigned DefIdx = GroupIdx[DefGroup] + 1; 1157 unsigned UseIdx = GroupIdx.back() + 1; 1158 for (unsigned j = 0; j != NumVals; ++j) 1159 MIB->tieOperands(DefIdx + j, UseIdx + j); 1160 } 1161 } 1162 break; 1163 } 1164 } 1165 1166 // GCC inline assembly allows input operands to also be early-clobber 1167 // output operands (so long as the operand is written only after it's 1168 // used), but this does not match the semantics of our early-clobber flag. 1169 // If an early-clobber operand register is also an input operand register, 1170 // then remove the early-clobber flag. 1171 for (unsigned Reg : ECRegs) { 1172 if (MIB->readsRegister(Reg, TRI)) { 1173 MachineOperand *MO = 1174 MIB->findRegisterDefOperand(Reg, false, false, TRI); 1175 assert(MO && "No def operand for clobbered register?"); 1176 MO->setIsEarlyClobber(false); 1177 } 1178 } 1179 1180 // Get the mdnode from the asm if it exists and add it to the instruction. 1181 SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode); 1182 const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD(); 1183 if (MD) 1184 MIB.addMetadata(MD); 1185 1186 MBB->insert(InsertPos, MIB); 1187 break; 1188 } 1189 } 1190 } 1191 1192 /// InstrEmitter - Construct an InstrEmitter and set it to start inserting 1193 /// at the given position in the given block. 1194 InstrEmitter::InstrEmitter(MachineBasicBlock *mbb, 1195 MachineBasicBlock::iterator insertpos) 1196 : MF(mbb->getParent()), MRI(&MF->getRegInfo()), 1197 TII(MF->getSubtarget().getInstrInfo()), 1198 TRI(MF->getSubtarget().getRegisterInfo()), 1199 TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb), 1200 InsertPos(insertpos) {} 1201