1 //===- AMDGPUMCInstLower.cpp - Lower AMDGPU MachineInstr to an MCInst -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Code to lower AMDGPU MachineInstrs to their corresponding MCInst. 11 // 12 //===----------------------------------------------------------------------===// 13 // 14 15 #include "AMDGPUAsmPrinter.h" 16 #include "AMDGPUTargetMachine.h" 17 #include "MCTargetDesc/AMDGPUInstPrinter.h" 18 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 19 #include "R600AsmPrinter.h" 20 #include "R600Subtarget.h" 21 #include "llvm/CodeGen/MachineBasicBlock.h" 22 #include "llvm/CodeGen/MachineInstr.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/MC/MCCodeEmitter.h" 27 #include "llvm/MC/MCContext.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/MC/MCInst.h" 30 #include "llvm/MC/MCObjectStreamer.h" 31 #include "llvm/MC/MCStreamer.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/Format.h" 34 #include <algorithm> 35 36 using namespace llvm; 37 38 namespace { 39 40 class AMDGPUMCInstLower { 41 MCContext &Ctx; 42 const TargetSubtargetInfo &ST; 43 const AsmPrinter &AP; 44 45 public: 46 AMDGPUMCInstLower(MCContext &ctx, const TargetSubtargetInfo &ST, 47 const AsmPrinter &AP); 48 49 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const; 50 51 /// Lower a MachineInstr to an MCInst 52 void lower(const MachineInstr *MI, MCInst &OutMI) const; 53 54 }; 55 56 class R600MCInstLower : public AMDGPUMCInstLower { 57 public: 58 R600MCInstLower(MCContext &ctx, const R600Subtarget &ST, 59 const AsmPrinter &AP); 60 61 /// Lower a MachineInstr to an MCInst 62 void lower(const MachineInstr *MI, MCInst &OutMI) const; 63 }; 64 65 66 } // End anonymous namespace 67 68 #include "AMDGPUGenMCPseudoLowering.inc" 69 70 AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, 71 const TargetSubtargetInfo &st, 72 const AsmPrinter &ap): 73 Ctx(ctx), ST(st), AP(ap) { } 74 75 static MCSymbolRefExpr::VariantKind getVariantKind(unsigned MOFlags) { 76 switch (MOFlags) { 77 default: 78 return MCSymbolRefExpr::VK_None; 79 case SIInstrInfo::MO_GOTPCREL: 80 return MCSymbolRefExpr::VK_GOTPCREL; 81 case SIInstrInfo::MO_GOTPCREL32_LO: 82 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_LO; 83 case SIInstrInfo::MO_GOTPCREL32_HI: 84 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_HI; 85 case SIInstrInfo::MO_REL32_LO: 86 return MCSymbolRefExpr::VK_AMDGPU_REL32_LO; 87 case SIInstrInfo::MO_REL32_HI: 88 return MCSymbolRefExpr::VK_AMDGPU_REL32_HI; 89 case SIInstrInfo::MO_ABS32_LO: 90 return MCSymbolRefExpr::VK_AMDGPU_ABS32_LO; 91 case SIInstrInfo::MO_ABS32_HI: 92 return MCSymbolRefExpr::VK_AMDGPU_ABS32_HI; 93 } 94 } 95 96 bool AMDGPUMCInstLower::lowerOperand(const MachineOperand &MO, 97 MCOperand &MCOp) const { 98 switch (MO.getType()) { 99 default: 100 break; 101 case MachineOperand::MO_Immediate: 102 MCOp = MCOperand::createImm(MO.getImm()); 103 return true; 104 case MachineOperand::MO_Register: 105 MCOp = MCOperand::createReg(AMDGPU::getMCReg(MO.getReg(), ST)); 106 return true; 107 case MachineOperand::MO_MachineBasicBlock: 108 MCOp = MCOperand::createExpr( 109 MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); 110 return true; 111 case MachineOperand::MO_GlobalAddress: { 112 const GlobalValue *GV = MO.getGlobal(); 113 SmallString<128> SymbolName; 114 AP.getNameWithPrefix(SymbolName, GV); 115 MCSymbol *Sym = Ctx.getOrCreateSymbol(SymbolName); 116 const MCExpr *Expr = 117 MCSymbolRefExpr::create(Sym, getVariantKind(MO.getTargetFlags()),Ctx); 118 int64_t Offset = MO.getOffset(); 119 if (Offset != 0) { 120 Expr = MCBinaryExpr::createAdd(Expr, 121 MCConstantExpr::create(Offset, Ctx), Ctx); 122 } 123 MCOp = MCOperand::createExpr(Expr); 124 return true; 125 } 126 case MachineOperand::MO_ExternalSymbol: { 127 MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName())); 128 Sym->setExternal(true); 129 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx); 130 MCOp = MCOperand::createExpr(Expr); 131 return true; 132 } 133 case MachineOperand::MO_RegisterMask: 134 // Regmasks are like implicit defs. 135 return false; 136 case MachineOperand::MO_MCSymbol: 137 if (MO.getTargetFlags() == SIInstrInfo::MO_FAR_BRANCH_OFFSET) { 138 MCSymbol *Sym = MO.getMCSymbol(); 139 MCOp = MCOperand::createExpr(Sym->getVariableValue()); 140 return true; 141 } 142 break; 143 } 144 llvm_unreachable("unknown operand type"); 145 } 146 147 void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { 148 unsigned Opcode = MI->getOpcode(); 149 const auto *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo()); 150 151 // FIXME: Should be able to handle this with emitPseudoExpansionLowering. We 152 // need to select it to the subtarget specific version, and there's no way to 153 // do that with a single pseudo source operation. 154 if (Opcode == AMDGPU::S_SETPC_B64_return) 155 Opcode = AMDGPU::S_SETPC_B64; 156 else if (Opcode == AMDGPU::SI_CALL) { 157 // SI_CALL is just S_SWAPPC_B64 with an additional operand to track the 158 // called function (which we need to remove here). 159 OutMI.setOpcode(TII->pseudoToMCOpcode(AMDGPU::S_SWAPPC_B64)); 160 MCOperand Dest, Src; 161 lowerOperand(MI->getOperand(0), Dest); 162 lowerOperand(MI->getOperand(1), Src); 163 OutMI.addOperand(Dest); 164 OutMI.addOperand(Src); 165 return; 166 } else if (Opcode == AMDGPU::SI_TCRETURN) { 167 // TODO: How to use branch immediate and avoid register+add? 168 Opcode = AMDGPU::S_SETPC_B64; 169 } 170 171 int MCOpcode = TII->pseudoToMCOpcode(Opcode); 172 if (MCOpcode == -1) { 173 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 174 C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have " 175 "a target-specific version: " + Twine(MI->getOpcode())); 176 } 177 178 OutMI.setOpcode(MCOpcode); 179 180 for (const MachineOperand &MO : MI->explicit_operands()) { 181 MCOperand MCOp; 182 lowerOperand(MO, MCOp); 183 OutMI.addOperand(MCOp); 184 } 185 186 int FIIdx = AMDGPU::getNamedOperandIdx(MCOpcode, AMDGPU::OpName::fi); 187 if (FIIdx >= (int)OutMI.getNumOperands()) 188 OutMI.addOperand(MCOperand::createImm(0)); 189 } 190 191 bool AMDGPUAsmPrinter::lowerOperand(const MachineOperand &MO, 192 MCOperand &MCOp) const { 193 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>(); 194 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 195 return MCInstLowering.lowerOperand(MO, MCOp); 196 } 197 198 static const MCExpr *lowerAddrSpaceCast(const TargetMachine &TM, 199 const Constant *CV, 200 MCContext &OutContext) { 201 // TargetMachine does not support llvm-style cast. Use C++-style cast. 202 // This is safe since TM is always of type AMDGPUTargetMachine or its 203 // derived class. 204 auto &AT = static_cast<const AMDGPUTargetMachine&>(TM); 205 auto *CE = dyn_cast<ConstantExpr>(CV); 206 207 // Lower null pointers in private and local address space. 208 // Clang generates addrspacecast for null pointers in private and local 209 // address space, which needs to be lowered. 210 if (CE && CE->getOpcode() == Instruction::AddrSpaceCast) { 211 auto Op = CE->getOperand(0); 212 auto SrcAddr = Op->getType()->getPointerAddressSpace(); 213 if (Op->isNullValue() && AT.getNullPointerValue(SrcAddr) == 0) { 214 auto DstAddr = CE->getType()->getPointerAddressSpace(); 215 return MCConstantExpr::create(AT.getNullPointerValue(DstAddr), 216 OutContext); 217 } 218 } 219 return nullptr; 220 } 221 222 const MCExpr *AMDGPUAsmPrinter::lowerConstant(const Constant *CV) { 223 if (const MCExpr *E = lowerAddrSpaceCast(TM, CV, OutContext)) 224 return E; 225 return AsmPrinter::lowerConstant(CV); 226 } 227 228 void AMDGPUAsmPrinter::emitInstruction(const MachineInstr *MI) { 229 if (emitPseudoExpansionLowering(*OutStreamer, MI)) 230 return; 231 232 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>(); 233 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 234 235 StringRef Err; 236 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { 237 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 238 C.emitError("Illegal instruction detected: " + Err); 239 MI->print(errs()); 240 } 241 242 if (MI->isBundle()) { 243 const MachineBasicBlock *MBB = MI->getParent(); 244 MachineBasicBlock::const_instr_iterator I = ++MI->getIterator(); 245 while (I != MBB->instr_end() && I->isInsideBundle()) { 246 emitInstruction(&*I); 247 ++I; 248 } 249 } else { 250 // We don't want these pseudo instructions encoded. They are 251 // placeholder terminator instructions and should only be printed as 252 // comments. 253 if (MI->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) { 254 if (isVerbose()) 255 OutStreamer->emitRawComment(" return to shader part epilog"); 256 return; 257 } 258 259 if (MI->getOpcode() == AMDGPU::WAVE_BARRIER) { 260 if (isVerbose()) 261 OutStreamer->emitRawComment(" wave barrier"); 262 return; 263 } 264 265 if (MI->getOpcode() == AMDGPU::SI_MASKED_UNREACHABLE) { 266 if (isVerbose()) 267 OutStreamer->emitRawComment(" divergent unreachable"); 268 return; 269 } 270 271 MCInst TmpInst; 272 MCInstLowering.lower(MI, TmpInst); 273 EmitToStreamer(*OutStreamer, TmpInst); 274 275 #ifdef EXPENSIVE_CHECKS 276 // Sanity-check getInstSizeInBytes on explicitly specified CPUs (it cannot 277 // work correctly for the generic CPU). 278 // 279 // The isPseudo check really shouldn't be here, but unfortunately there are 280 // some negative lit tests that depend on being able to continue through 281 // here even when pseudo instructions haven't been lowered. 282 // 283 // We also overestimate branch sizes with the offset bug. 284 if (!MI->isPseudo() && STI.isCPUStringValid(STI.getCPU()) && 285 (!STI.hasOffset3fBug() || !MI->isBranch())) { 286 SmallVector<MCFixup, 4> Fixups; 287 SmallVector<char, 16> CodeBytes; 288 raw_svector_ostream CodeStream(CodeBytes); 289 290 std::unique_ptr<MCCodeEmitter> InstEmitter(createSIMCCodeEmitter( 291 *STI.getInstrInfo(), *OutContext.getRegisterInfo(), OutContext)); 292 InstEmitter->encodeInstruction(TmpInst, CodeStream, Fixups, STI); 293 294 assert(CodeBytes.size() == STI.getInstrInfo()->getInstSizeInBytes(*MI)); 295 } 296 #endif 297 298 if (DumpCodeInstEmitter) { 299 // Disassemble instruction/operands to text 300 DisasmLines.resize(DisasmLines.size() + 1); 301 std::string &DisasmLine = DisasmLines.back(); 302 raw_string_ostream DisasmStream(DisasmLine); 303 304 AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(), *STI.getInstrInfo(), 305 *STI.getRegisterInfo()); 306 InstPrinter.printInst(&TmpInst, 0, StringRef(), STI, DisasmStream); 307 308 // Disassemble instruction/operands to hex representation. 309 SmallVector<MCFixup, 4> Fixups; 310 SmallVector<char, 16> CodeBytes; 311 raw_svector_ostream CodeStream(CodeBytes); 312 313 DumpCodeInstEmitter->encodeInstruction( 314 TmpInst, CodeStream, Fixups, MF->getSubtarget<MCSubtargetInfo>()); 315 HexLines.resize(HexLines.size() + 1); 316 std::string &HexLine = HexLines.back(); 317 raw_string_ostream HexStream(HexLine); 318 319 for (size_t i = 0; i < CodeBytes.size(); i += 4) { 320 unsigned int CodeDWord = *(unsigned int *)&CodeBytes[i]; 321 HexStream << format("%s%08X", (i > 0 ? " " : ""), CodeDWord); 322 } 323 324 DisasmStream.flush(); 325 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLine.size()); 326 } 327 } 328 } 329 330 R600MCInstLower::R600MCInstLower(MCContext &Ctx, const R600Subtarget &ST, 331 const AsmPrinter &AP) : 332 AMDGPUMCInstLower(Ctx, ST, AP) { } 333 334 void R600MCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { 335 OutMI.setOpcode(MI->getOpcode()); 336 for (const MachineOperand &MO : MI->explicit_operands()) { 337 MCOperand MCOp; 338 lowerOperand(MO, MCOp); 339 OutMI.addOperand(MCOp); 340 } 341 } 342 343 void R600AsmPrinter::emitInstruction(const MachineInstr *MI) { 344 const R600Subtarget &STI = MF->getSubtarget<R600Subtarget>(); 345 R600MCInstLower MCInstLowering(OutContext, STI, *this); 346 347 StringRef Err; 348 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { 349 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 350 C.emitError("Illegal instruction detected: " + Err); 351 MI->print(errs()); 352 } 353 354 if (MI->isBundle()) { 355 const MachineBasicBlock *MBB = MI->getParent(); 356 MachineBasicBlock::const_instr_iterator I = ++MI->getIterator(); 357 while (I != MBB->instr_end() && I->isInsideBundle()) { 358 emitInstruction(&*I); 359 ++I; 360 } 361 } else { 362 MCInst TmpInst; 363 MCInstLowering.lower(MI, TmpInst); 364 EmitToStreamer(*OutStreamer, TmpInst); 365 } 366 } 367 368 const MCExpr *R600AsmPrinter::lowerConstant(const Constant *CV) { 369 if (const MCExpr *E = lowerAddrSpaceCast(TM, CV, OutContext)) 370 return E; 371 return AsmPrinter::lowerConstant(CV); 372 } 373