1 //===- AMDGPUMCInstLower.cpp - Lower AMDGPU MachineInstr to an MCInst -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// Code to lower AMDGPU MachineInstrs to their corresponding MCInst. 12 // 13 //===----------------------------------------------------------------------===// 14 // 15 16 #include "AMDGPUAsmPrinter.h" 17 #include "AMDGPUSubtarget.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "InstPrinter/AMDGPUInstPrinter.h" 20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 21 #include "R600AsmPrinter.h" 22 #include "SIInstrInfo.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineInstr.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/GlobalVariable.h" 28 #include "llvm/MC/MCCodeEmitter.h" 29 #include "llvm/MC/MCContext.h" 30 #include "llvm/MC/MCExpr.h" 31 #include "llvm/MC/MCInst.h" 32 #include "llvm/MC/MCObjectStreamer.h" 33 #include "llvm/MC/MCStreamer.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/Format.h" 36 #include <algorithm> 37 38 using namespace llvm; 39 40 namespace { 41 42 class AMDGPUMCInstLower { 43 MCContext &Ctx; 44 const TargetSubtargetInfo &ST; 45 const AsmPrinter &AP; 46 47 const MCExpr *getLongBranchBlockExpr(const MachineBasicBlock &SrcBB, 48 const MachineOperand &MO) const; 49 50 public: 51 AMDGPUMCInstLower(MCContext &ctx, const TargetSubtargetInfo &ST, 52 const AsmPrinter &AP); 53 54 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const; 55 56 /// Lower a MachineInstr to an MCInst 57 void lower(const MachineInstr *MI, MCInst &OutMI) const; 58 59 }; 60 61 class R600MCInstLower : public AMDGPUMCInstLower { 62 public: 63 R600MCInstLower(MCContext &ctx, const R600Subtarget &ST, 64 const AsmPrinter &AP); 65 66 /// Lower a MachineInstr to an MCInst 67 void lower(const MachineInstr *MI, MCInst &OutMI) const; 68 }; 69 70 71 } // End anonymous namespace 72 73 #include "AMDGPUGenMCPseudoLowering.inc" 74 75 AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, 76 const TargetSubtargetInfo &st, 77 const AsmPrinter &ap): 78 Ctx(ctx), ST(st), AP(ap) { } 79 80 static MCSymbolRefExpr::VariantKind getVariantKind(unsigned MOFlags) { 81 switch (MOFlags) { 82 default: 83 return MCSymbolRefExpr::VK_None; 84 case SIInstrInfo::MO_GOTPCREL: 85 return MCSymbolRefExpr::VK_GOTPCREL; 86 case SIInstrInfo::MO_GOTPCREL32_LO: 87 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_LO; 88 case SIInstrInfo::MO_GOTPCREL32_HI: 89 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_HI; 90 case SIInstrInfo::MO_REL32_LO: 91 return MCSymbolRefExpr::VK_AMDGPU_REL32_LO; 92 case SIInstrInfo::MO_REL32_HI: 93 return MCSymbolRefExpr::VK_AMDGPU_REL32_HI; 94 } 95 } 96 97 const MCExpr *AMDGPUMCInstLower::getLongBranchBlockExpr( 98 const MachineBasicBlock &SrcBB, 99 const MachineOperand &MO) const { 100 const MCExpr *DestBBSym 101 = MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx); 102 const MCExpr *SrcBBSym = MCSymbolRefExpr::create(SrcBB.getSymbol(), Ctx); 103 104 assert(SrcBB.front().getOpcode() == AMDGPU::S_GETPC_B64 && 105 ST.getInstrInfo()->get(AMDGPU::S_GETPC_B64).Size == 4); 106 107 // s_getpc_b64 returns the address of next instruction. 108 const MCConstantExpr *One = MCConstantExpr::create(4, Ctx); 109 SrcBBSym = MCBinaryExpr::createAdd(SrcBBSym, One, Ctx); 110 111 if (MO.getTargetFlags() == AMDGPU::TF_LONG_BRANCH_FORWARD) 112 return MCBinaryExpr::createSub(DestBBSym, SrcBBSym, Ctx); 113 114 assert(MO.getTargetFlags() == AMDGPU::TF_LONG_BRANCH_BACKWARD); 115 return MCBinaryExpr::createSub(SrcBBSym, DestBBSym, Ctx); 116 } 117 118 bool AMDGPUMCInstLower::lowerOperand(const MachineOperand &MO, 119 MCOperand &MCOp) const { 120 switch (MO.getType()) { 121 default: 122 llvm_unreachable("unknown operand type"); 123 case MachineOperand::MO_Immediate: 124 MCOp = MCOperand::createImm(MO.getImm()); 125 return true; 126 case MachineOperand::MO_Register: 127 MCOp = MCOperand::createReg(AMDGPU::getMCReg(MO.getReg(), ST)); 128 return true; 129 case MachineOperand::MO_MachineBasicBlock: { 130 if (MO.getTargetFlags() != 0) { 131 MCOp = MCOperand::createExpr( 132 getLongBranchBlockExpr(*MO.getParent()->getParent(), MO)); 133 } else { 134 MCOp = MCOperand::createExpr( 135 MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); 136 } 137 138 return true; 139 } 140 case MachineOperand::MO_GlobalAddress: { 141 const GlobalValue *GV = MO.getGlobal(); 142 SmallString<128> SymbolName; 143 AP.getNameWithPrefix(SymbolName, GV); 144 MCSymbol *Sym = Ctx.getOrCreateSymbol(SymbolName); 145 const MCExpr *SymExpr = 146 MCSymbolRefExpr::create(Sym, getVariantKind(MO.getTargetFlags()),Ctx); 147 const MCExpr *Expr = MCBinaryExpr::createAdd(SymExpr, 148 MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); 149 MCOp = MCOperand::createExpr(Expr); 150 return true; 151 } 152 case MachineOperand::MO_ExternalSymbol: { 153 MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName())); 154 Sym->setExternal(true); 155 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx); 156 MCOp = MCOperand::createExpr(Expr); 157 return true; 158 } 159 case MachineOperand::MO_RegisterMask: 160 // Regmasks are like implicit defs. 161 return false; 162 } 163 } 164 165 void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { 166 unsigned Opcode = MI->getOpcode(); 167 const auto *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo()); 168 169 // FIXME: Should be able to handle this with emitPseudoExpansionLowering. We 170 // need to select it to the subtarget specific version, and there's no way to 171 // do that with a single pseudo source operation. 172 if (Opcode == AMDGPU::S_SETPC_B64_return) 173 Opcode = AMDGPU::S_SETPC_B64; 174 else if (Opcode == AMDGPU::SI_CALL) { 175 // SI_CALL is just S_SWAPPC_B64 with an additional operand to track the 176 // called function (which we need to remove here). 177 OutMI.setOpcode(TII->pseudoToMCOpcode(AMDGPU::S_SWAPPC_B64)); 178 MCOperand Dest, Src; 179 lowerOperand(MI->getOperand(0), Dest); 180 lowerOperand(MI->getOperand(1), Src); 181 OutMI.addOperand(Dest); 182 OutMI.addOperand(Src); 183 return; 184 } else if (Opcode == AMDGPU::SI_TCRETURN) { 185 // TODO: How to use branch immediate and avoid register+add? 186 Opcode = AMDGPU::S_SETPC_B64; 187 } 188 189 int MCOpcode = TII->pseudoToMCOpcode(Opcode); 190 if (MCOpcode == -1) { 191 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 192 C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have " 193 "a target-specific version: " + Twine(MI->getOpcode())); 194 } 195 196 OutMI.setOpcode(MCOpcode); 197 198 for (const MachineOperand &MO : MI->explicit_operands()) { 199 MCOperand MCOp; 200 lowerOperand(MO, MCOp); 201 OutMI.addOperand(MCOp); 202 } 203 } 204 205 bool AMDGPUAsmPrinter::lowerOperand(const MachineOperand &MO, 206 MCOperand &MCOp) const { 207 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>(); 208 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 209 return MCInstLowering.lowerOperand(MO, MCOp); 210 } 211 212 static const MCExpr *lowerAddrSpaceCast(const TargetMachine &TM, 213 const Constant *CV, 214 MCContext &OutContext) { 215 // TargetMachine does not support llvm-style cast. Use C++-style cast. 216 // This is safe since TM is always of type AMDGPUTargetMachine or its 217 // derived class. 218 auto &AT = static_cast<const AMDGPUTargetMachine&>(TM); 219 auto *CE = dyn_cast<ConstantExpr>(CV); 220 221 // Lower null pointers in private and local address space. 222 // Clang generates addrspacecast for null pointers in private and local 223 // address space, which needs to be lowered. 224 if (CE && CE->getOpcode() == Instruction::AddrSpaceCast) { 225 auto Op = CE->getOperand(0); 226 auto SrcAddr = Op->getType()->getPointerAddressSpace(); 227 if (Op->isNullValue() && AT.getNullPointerValue(SrcAddr) == 0) { 228 auto DstAddr = CE->getType()->getPointerAddressSpace(); 229 return MCConstantExpr::create(AT.getNullPointerValue(DstAddr), 230 OutContext); 231 } 232 } 233 return nullptr; 234 } 235 236 const MCExpr *AMDGPUAsmPrinter::lowerConstant(const Constant *CV) { 237 if (const MCExpr *E = lowerAddrSpaceCast(TM, CV, OutContext)) 238 return E; 239 return AsmPrinter::lowerConstant(CV); 240 } 241 242 void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) { 243 if (emitPseudoExpansionLowering(*OutStreamer, MI)) 244 return; 245 246 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>(); 247 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 248 249 StringRef Err; 250 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { 251 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 252 C.emitError("Illegal instruction detected: " + Err); 253 MI->print(errs()); 254 } 255 256 if (MI->isBundle()) { 257 const MachineBasicBlock *MBB = MI->getParent(); 258 MachineBasicBlock::const_instr_iterator I = ++MI->getIterator(); 259 while (I != MBB->instr_end() && I->isInsideBundle()) { 260 EmitInstruction(&*I); 261 ++I; 262 } 263 } else { 264 // We don't want SI_MASK_BRANCH/SI_RETURN_TO_EPILOG encoded. They are 265 // placeholder terminator instructions and should only be printed as 266 // comments. 267 if (MI->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 268 if (isVerbose()) { 269 SmallVector<char, 16> BBStr; 270 raw_svector_ostream Str(BBStr); 271 272 const MachineBasicBlock *MBB = MI->getOperand(0).getMBB(); 273 const MCSymbolRefExpr *Expr 274 = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext); 275 Expr->print(Str, MAI); 276 OutStreamer->emitRawComment(Twine(" mask branch ") + BBStr); 277 } 278 279 return; 280 } 281 282 if (MI->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) { 283 if (isVerbose()) 284 OutStreamer->emitRawComment(" return to shader part epilog"); 285 return; 286 } 287 288 if (MI->getOpcode() == AMDGPU::WAVE_BARRIER) { 289 if (isVerbose()) 290 OutStreamer->emitRawComment(" wave barrier"); 291 return; 292 } 293 294 if (MI->getOpcode() == AMDGPU::SI_MASKED_UNREACHABLE) { 295 if (isVerbose()) 296 OutStreamer->emitRawComment(" divergent unreachable"); 297 return; 298 } 299 300 MCInst TmpInst; 301 MCInstLowering.lower(MI, TmpInst); 302 EmitToStreamer(*OutStreamer, TmpInst); 303 304 #ifdef EXPENSIVE_CHECKS 305 // Sanity-check getInstSizeInBytes on explicitly specified CPUs (it cannot 306 // work correctly for the generic CPU). 307 // 308 // The isPseudo check really shouldn't be here, but unfortunately there are 309 // some negative lit tests that depend on being able to continue through 310 // here even when pseudo instructions haven't been lowered. 311 if (!MI->isPseudo() && STI.isCPUStringValid(STI.getCPU())) { 312 SmallVector<MCFixup, 4> Fixups; 313 SmallVector<char, 16> CodeBytes; 314 raw_svector_ostream CodeStream(CodeBytes); 315 316 std::unique_ptr<MCCodeEmitter> InstEmitter(createSIMCCodeEmitter( 317 *STI.getInstrInfo(), *OutContext.getRegisterInfo(), OutContext)); 318 InstEmitter->encodeInstruction(TmpInst, CodeStream, Fixups, STI); 319 320 assert(CodeBytes.size() == STI.getInstrInfo()->getInstSizeInBytes(*MI)); 321 } 322 #endif 323 324 if (STI.dumpCode()) { 325 // Disassemble instruction/operands to text. 326 DisasmLines.resize(DisasmLines.size() + 1); 327 std::string &DisasmLine = DisasmLines.back(); 328 raw_string_ostream DisasmStream(DisasmLine); 329 330 AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(), 331 *STI.getInstrInfo(), 332 *STI.getRegisterInfo()); 333 InstPrinter.printInst(&TmpInst, DisasmStream, StringRef(), STI); 334 335 // Disassemble instruction/operands to hex representation. 336 SmallVector<MCFixup, 4> Fixups; 337 SmallVector<char, 16> CodeBytes; 338 raw_svector_ostream CodeStream(CodeBytes); 339 340 auto &ObjStreamer = static_cast<MCObjectStreamer&>(*OutStreamer); 341 MCCodeEmitter &InstEmitter = ObjStreamer.getAssembler().getEmitter(); 342 InstEmitter.encodeInstruction(TmpInst, CodeStream, Fixups, 343 MF->getSubtarget<MCSubtargetInfo>()); 344 HexLines.resize(HexLines.size() + 1); 345 std::string &HexLine = HexLines.back(); 346 raw_string_ostream HexStream(HexLine); 347 348 for (size_t i = 0; i < CodeBytes.size(); i += 4) { 349 unsigned int CodeDWord = *(unsigned int *)&CodeBytes[i]; 350 HexStream << format("%s%08X", (i > 0 ? " " : ""), CodeDWord); 351 } 352 353 DisasmStream.flush(); 354 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLine.size()); 355 } 356 } 357 } 358 359 R600MCInstLower::R600MCInstLower(MCContext &Ctx, const R600Subtarget &ST, 360 const AsmPrinter &AP) : 361 AMDGPUMCInstLower(Ctx, ST, AP) { } 362 363 void R600MCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { 364 OutMI.setOpcode(MI->getOpcode()); 365 for (const MachineOperand &MO : MI->explicit_operands()) { 366 MCOperand MCOp; 367 lowerOperand(MO, MCOp); 368 OutMI.addOperand(MCOp); 369 } 370 } 371 372 void R600AsmPrinter::EmitInstruction(const MachineInstr *MI) { 373 const R600Subtarget &STI = MF->getSubtarget<R600Subtarget>(); 374 R600MCInstLower MCInstLowering(OutContext, STI, *this); 375 376 StringRef Err; 377 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { 378 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 379 C.emitError("Illegal instruction detected: " + Err); 380 MI->print(errs()); 381 } 382 383 if (MI->isBundle()) { 384 const MachineBasicBlock *MBB = MI->getParent(); 385 MachineBasicBlock::const_instr_iterator I = ++MI->getIterator(); 386 while (I != MBB->instr_end() && I->isInsideBundle()) { 387 EmitInstruction(&*I); 388 ++I; 389 } 390 } else { 391 MCInst TmpInst; 392 MCInstLowering.lower(MI, TmpInst); 393 EmitToStreamer(*OutStreamer, TmpInst); 394 } 395 } 396 397 const MCExpr *R600AsmPrinter::lowerConstant(const Constant *CV) { 398 if (const MCExpr *E = lowerAddrSpaceCast(TM, CV, OutContext)) 399 return E; 400 return AsmPrinter::lowerConstant(CV); 401 } 402