1 //===- AMDGPUMCInstLower.cpp - Lower AMDGPU MachineInstr to an MCInst -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Code to lower AMDGPU MachineInstrs to their corresponding MCInst. 11 // 12 //===----------------------------------------------------------------------===// 13 // 14 15 #include "AMDGPUAsmPrinter.h" 16 #include "AMDGPUSubtarget.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "MCTargetDesc/AMDGPUInstPrinter.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "R600AsmPrinter.h" 21 #include "SIInstrInfo.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/GlobalVariable.h" 27 #include "llvm/MC/MCCodeEmitter.h" 28 #include "llvm/MC/MCContext.h" 29 #include "llvm/MC/MCExpr.h" 30 #include "llvm/MC/MCInst.h" 31 #include "llvm/MC/MCObjectStreamer.h" 32 #include "llvm/MC/MCStreamer.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Support/Format.h" 35 #include <algorithm> 36 37 using namespace llvm; 38 39 namespace { 40 41 class AMDGPUMCInstLower { 42 MCContext &Ctx; 43 const TargetSubtargetInfo &ST; 44 const AsmPrinter &AP; 45 46 const MCExpr *getLongBranchBlockExpr(const MachineBasicBlock &SrcBB, 47 const MachineOperand &MO) const; 48 49 public: 50 AMDGPUMCInstLower(MCContext &ctx, const TargetSubtargetInfo &ST, 51 const AsmPrinter &AP); 52 53 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const; 54 55 /// Lower a MachineInstr to an MCInst 56 void lower(const MachineInstr *MI, MCInst &OutMI) const; 57 58 }; 59 60 class R600MCInstLower : public AMDGPUMCInstLower { 61 public: 62 R600MCInstLower(MCContext &ctx, const R600Subtarget &ST, 63 const AsmPrinter &AP); 64 65 /// Lower a MachineInstr to an MCInst 66 void lower(const MachineInstr *MI, MCInst &OutMI) const; 67 }; 68 69 70 } // End anonymous namespace 71 72 #include "AMDGPUGenMCPseudoLowering.inc" 73 74 AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, 75 const TargetSubtargetInfo &st, 76 const AsmPrinter &ap): 77 Ctx(ctx), ST(st), AP(ap) { } 78 79 static MCSymbolRefExpr::VariantKind getVariantKind(unsigned MOFlags) { 80 switch (MOFlags) { 81 default: 82 return MCSymbolRefExpr::VK_None; 83 case SIInstrInfo::MO_GOTPCREL: 84 return MCSymbolRefExpr::VK_GOTPCREL; 85 case SIInstrInfo::MO_GOTPCREL32_LO: 86 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_LO; 87 case SIInstrInfo::MO_GOTPCREL32_HI: 88 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_HI; 89 case SIInstrInfo::MO_REL32_LO: 90 return MCSymbolRefExpr::VK_AMDGPU_REL32_LO; 91 case SIInstrInfo::MO_REL32_HI: 92 return MCSymbolRefExpr::VK_AMDGPU_REL32_HI; 93 } 94 } 95 96 const MCExpr *AMDGPUMCInstLower::getLongBranchBlockExpr( 97 const MachineBasicBlock &SrcBB, 98 const MachineOperand &MO) const { 99 const MCExpr *DestBBSym 100 = MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx); 101 const MCExpr *SrcBBSym = MCSymbolRefExpr::create(SrcBB.getSymbol(), Ctx); 102 103 // FIXME: The first half of this assert should be removed. This should 104 // probably be PC relative instead of using the source block symbol, and 105 // therefore the indirect branch expansion should use a bundle. 106 assert( 107 skipDebugInstructionsForward(SrcBB.begin(), SrcBB.end())->getOpcode() == 108 AMDGPU::S_GETPC_B64 && 109 ST.getInstrInfo()->get(AMDGPU::S_GETPC_B64).Size == 4); 110 111 // s_getpc_b64 returns the address of next instruction. 112 const MCConstantExpr *One = MCConstantExpr::create(4, Ctx); 113 SrcBBSym = MCBinaryExpr::createAdd(SrcBBSym, One, Ctx); 114 115 if (MO.getTargetFlags() == AMDGPU::TF_LONG_BRANCH_FORWARD) 116 return MCBinaryExpr::createSub(DestBBSym, SrcBBSym, Ctx); 117 118 assert(MO.getTargetFlags() == AMDGPU::TF_LONG_BRANCH_BACKWARD); 119 return MCBinaryExpr::createSub(SrcBBSym, DestBBSym, Ctx); 120 } 121 122 bool AMDGPUMCInstLower::lowerOperand(const MachineOperand &MO, 123 MCOperand &MCOp) const { 124 switch (MO.getType()) { 125 default: 126 llvm_unreachable("unknown operand type"); 127 case MachineOperand::MO_Immediate: 128 MCOp = MCOperand::createImm(MO.getImm()); 129 return true; 130 case MachineOperand::MO_Register: 131 MCOp = MCOperand::createReg(AMDGPU::getMCReg(MO.getReg(), ST)); 132 return true; 133 case MachineOperand::MO_MachineBasicBlock: { 134 if (MO.getTargetFlags() != 0) { 135 MCOp = MCOperand::createExpr( 136 getLongBranchBlockExpr(*MO.getParent()->getParent(), MO)); 137 } else { 138 MCOp = MCOperand::createExpr( 139 MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); 140 } 141 142 return true; 143 } 144 case MachineOperand::MO_GlobalAddress: { 145 const GlobalValue *GV = MO.getGlobal(); 146 SmallString<128> SymbolName; 147 AP.getNameWithPrefix(SymbolName, GV); 148 MCSymbol *Sym = Ctx.getOrCreateSymbol(SymbolName); 149 const MCExpr *SymExpr = 150 MCSymbolRefExpr::create(Sym, getVariantKind(MO.getTargetFlags()),Ctx); 151 const MCExpr *Expr = MCBinaryExpr::createAdd(SymExpr, 152 MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); 153 MCOp = MCOperand::createExpr(Expr); 154 return true; 155 } 156 case MachineOperand::MO_ExternalSymbol: { 157 MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName())); 158 Sym->setExternal(true); 159 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx); 160 MCOp = MCOperand::createExpr(Expr); 161 return true; 162 } 163 case MachineOperand::MO_RegisterMask: 164 // Regmasks are like implicit defs. 165 return false; 166 } 167 } 168 169 void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { 170 unsigned Opcode = MI->getOpcode(); 171 const auto *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo()); 172 173 // FIXME: Should be able to handle this with emitPseudoExpansionLowering. We 174 // need to select it to the subtarget specific version, and there's no way to 175 // do that with a single pseudo source operation. 176 if (Opcode == AMDGPU::S_SETPC_B64_return) 177 Opcode = AMDGPU::S_SETPC_B64; 178 else if (Opcode == AMDGPU::SI_CALL) { 179 // SI_CALL is just S_SWAPPC_B64 with an additional operand to track the 180 // called function (which we need to remove here). 181 OutMI.setOpcode(TII->pseudoToMCOpcode(AMDGPU::S_SWAPPC_B64)); 182 MCOperand Dest, Src; 183 lowerOperand(MI->getOperand(0), Dest); 184 lowerOperand(MI->getOperand(1), Src); 185 OutMI.addOperand(Dest); 186 OutMI.addOperand(Src); 187 return; 188 } else if (Opcode == AMDGPU::SI_TCRETURN) { 189 // TODO: How to use branch immediate and avoid register+add? 190 Opcode = AMDGPU::S_SETPC_B64; 191 } 192 193 int MCOpcode = TII->pseudoToMCOpcode(Opcode); 194 if (MCOpcode == -1) { 195 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 196 C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have " 197 "a target-specific version: " + Twine(MI->getOpcode())); 198 } 199 200 OutMI.setOpcode(MCOpcode); 201 202 for (const MachineOperand &MO : MI->explicit_operands()) { 203 MCOperand MCOp; 204 lowerOperand(MO, MCOp); 205 OutMI.addOperand(MCOp); 206 } 207 } 208 209 bool AMDGPUAsmPrinter::lowerOperand(const MachineOperand &MO, 210 MCOperand &MCOp) const { 211 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>(); 212 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 213 return MCInstLowering.lowerOperand(MO, MCOp); 214 } 215 216 static const MCExpr *lowerAddrSpaceCast(const TargetMachine &TM, 217 const Constant *CV, 218 MCContext &OutContext) { 219 // TargetMachine does not support llvm-style cast. Use C++-style cast. 220 // This is safe since TM is always of type AMDGPUTargetMachine or its 221 // derived class. 222 auto &AT = static_cast<const AMDGPUTargetMachine&>(TM); 223 auto *CE = dyn_cast<ConstantExpr>(CV); 224 225 // Lower null pointers in private and local address space. 226 // Clang generates addrspacecast for null pointers in private and local 227 // address space, which needs to be lowered. 228 if (CE && CE->getOpcode() == Instruction::AddrSpaceCast) { 229 auto Op = CE->getOperand(0); 230 auto SrcAddr = Op->getType()->getPointerAddressSpace(); 231 if (Op->isNullValue() && AT.getNullPointerValue(SrcAddr) == 0) { 232 auto DstAddr = CE->getType()->getPointerAddressSpace(); 233 return MCConstantExpr::create(AT.getNullPointerValue(DstAddr), 234 OutContext); 235 } 236 } 237 return nullptr; 238 } 239 240 const MCExpr *AMDGPUAsmPrinter::lowerConstant(const Constant *CV) { 241 if (const MCExpr *E = lowerAddrSpaceCast(TM, CV, OutContext)) 242 return E; 243 return AsmPrinter::lowerConstant(CV); 244 } 245 246 void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) { 247 if (emitPseudoExpansionLowering(*OutStreamer, MI)) 248 return; 249 250 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>(); 251 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 252 253 StringRef Err; 254 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { 255 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 256 C.emitError("Illegal instruction detected: " + Err); 257 MI->print(errs()); 258 } 259 260 if (MI->isBundle()) { 261 const MachineBasicBlock *MBB = MI->getParent(); 262 MachineBasicBlock::const_instr_iterator I = ++MI->getIterator(); 263 while (I != MBB->instr_end() && I->isInsideBundle()) { 264 EmitInstruction(&*I); 265 ++I; 266 } 267 } else { 268 // We don't want SI_MASK_BRANCH/SI_RETURN_TO_EPILOG encoded. They are 269 // placeholder terminator instructions and should only be printed as 270 // comments. 271 if (MI->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 272 if (isVerbose()) { 273 SmallVector<char, 16> BBStr; 274 raw_svector_ostream Str(BBStr); 275 276 const MachineBasicBlock *MBB = MI->getOperand(0).getMBB(); 277 const MCSymbolRefExpr *Expr 278 = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext); 279 Expr->print(Str, MAI); 280 OutStreamer->emitRawComment(Twine(" mask branch ") + BBStr); 281 } 282 283 return; 284 } 285 286 if (MI->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) { 287 if (isVerbose()) 288 OutStreamer->emitRawComment(" return to shader part epilog"); 289 return; 290 } 291 292 if (MI->getOpcode() == AMDGPU::WAVE_BARRIER) { 293 if (isVerbose()) 294 OutStreamer->emitRawComment(" wave barrier"); 295 return; 296 } 297 298 if (MI->getOpcode() == AMDGPU::SI_MASKED_UNREACHABLE) { 299 if (isVerbose()) 300 OutStreamer->emitRawComment(" divergent unreachable"); 301 return; 302 } 303 304 MCInst TmpInst; 305 MCInstLowering.lower(MI, TmpInst); 306 EmitToStreamer(*OutStreamer, TmpInst); 307 308 #ifdef EXPENSIVE_CHECKS 309 // Sanity-check getInstSizeInBytes on explicitly specified CPUs (it cannot 310 // work correctly for the generic CPU). 311 // 312 // The isPseudo check really shouldn't be here, but unfortunately there are 313 // some negative lit tests that depend on being able to continue through 314 // here even when pseudo instructions haven't been lowered. 315 if (!MI->isPseudo() && STI.isCPUStringValid(STI.getCPU())) { 316 SmallVector<MCFixup, 4> Fixups; 317 SmallVector<char, 16> CodeBytes; 318 raw_svector_ostream CodeStream(CodeBytes); 319 320 std::unique_ptr<MCCodeEmitter> InstEmitter(createSIMCCodeEmitter( 321 *STI.getInstrInfo(), *OutContext.getRegisterInfo(), OutContext)); 322 InstEmitter->encodeInstruction(TmpInst, CodeStream, Fixups, STI); 323 324 assert(CodeBytes.size() == STI.getInstrInfo()->getInstSizeInBytes(*MI)); 325 } 326 #endif 327 328 if (DumpCodeInstEmitter) { 329 // Disassemble instruction/operands to text 330 DisasmLines.resize(DisasmLines.size() + 1); 331 std::string &DisasmLine = DisasmLines.back(); 332 raw_string_ostream DisasmStream(DisasmLine); 333 334 AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(), *STI.getInstrInfo(), 335 *STI.getRegisterInfo()); 336 InstPrinter.printInst(&TmpInst, DisasmStream, StringRef(), STI); 337 338 // Disassemble instruction/operands to hex representation. 339 SmallVector<MCFixup, 4> Fixups; 340 SmallVector<char, 16> CodeBytes; 341 raw_svector_ostream CodeStream(CodeBytes); 342 343 DumpCodeInstEmitter->encodeInstruction( 344 TmpInst, CodeStream, Fixups, MF->getSubtarget<MCSubtargetInfo>()); 345 HexLines.resize(HexLines.size() + 1); 346 std::string &HexLine = HexLines.back(); 347 raw_string_ostream HexStream(HexLine); 348 349 for (size_t i = 0; i < CodeBytes.size(); i += 4) { 350 unsigned int CodeDWord = *(unsigned int *)&CodeBytes[i]; 351 HexStream << format("%s%08X", (i > 0 ? " " : ""), CodeDWord); 352 } 353 354 DisasmStream.flush(); 355 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLine.size()); 356 } 357 } 358 } 359 360 R600MCInstLower::R600MCInstLower(MCContext &Ctx, const R600Subtarget &ST, 361 const AsmPrinter &AP) : 362 AMDGPUMCInstLower(Ctx, ST, AP) { } 363 364 void R600MCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { 365 OutMI.setOpcode(MI->getOpcode()); 366 for (const MachineOperand &MO : MI->explicit_operands()) { 367 MCOperand MCOp; 368 lowerOperand(MO, MCOp); 369 OutMI.addOperand(MCOp); 370 } 371 } 372 373 void R600AsmPrinter::EmitInstruction(const MachineInstr *MI) { 374 const R600Subtarget &STI = MF->getSubtarget<R600Subtarget>(); 375 R600MCInstLower MCInstLowering(OutContext, STI, *this); 376 377 StringRef Err; 378 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { 379 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 380 C.emitError("Illegal instruction detected: " + Err); 381 MI->print(errs()); 382 } 383 384 if (MI->isBundle()) { 385 const MachineBasicBlock *MBB = MI->getParent(); 386 MachineBasicBlock::const_instr_iterator I = ++MI->getIterator(); 387 while (I != MBB->instr_end() && I->isInsideBundle()) { 388 EmitInstruction(&*I); 389 ++I; 390 } 391 } else { 392 MCInst TmpInst; 393 MCInstLowering.lower(MI, TmpInst); 394 EmitToStreamer(*OutStreamer, TmpInst); 395 } 396 } 397 398 const MCExpr *R600AsmPrinter::lowerConstant(const Constant *CV) { 399 if (const MCExpr *E = lowerAddrSpaceCast(TM, CV, OutContext)) 400 return E; 401 return AsmPrinter::lowerConstant(CV); 402 } 403