1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// The SI code emitter produces machine code that can be executed 11 /// directly on the GPU device. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "AMDGPURegisterInfo.h" 17 #include "MCTargetDesc/AMDGPUFixupKinds.h" 18 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "SIDefines.h" 21 #include "Utils/AMDGPUBaseInfo.h" 22 #include "llvm/MC/MCCodeEmitter.h" 23 #include "llvm/MC/MCContext.h" 24 #include "llvm/MC/MCExpr.h" 25 #include "llvm/MC/MCFixup.h" 26 #include "llvm/MC/MCInst.h" 27 #include "llvm/MC/MCInstrDesc.h" 28 #include "llvm/MC/MCInstrInfo.h" 29 #include "llvm/MC/MCRegisterInfo.h" 30 #include "llvm/MC/MCSubtargetInfo.h" 31 #include "llvm/MC/MCSymbol.h" 32 #include "llvm/Support/Casting.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Support/MathExtras.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include <cassert> 37 #include <cstdint> 38 #include <cstdlib> 39 40 using namespace llvm; 41 42 namespace { 43 44 class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { 45 const MCRegisterInfo &MRI; 46 47 /// Encode an fp or int literal 48 uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo, 49 const MCSubtargetInfo &STI) const; 50 51 public: 52 SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri, 53 MCContext &ctx) 54 : AMDGPUMCCodeEmitter(mcii), MRI(mri) {} 55 SIMCCodeEmitter(const SIMCCodeEmitter &) = delete; 56 SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete; 57 58 /// Encode the instruction and write it to the OS. 59 void encodeInstruction(const MCInst &MI, raw_ostream &OS, 60 SmallVectorImpl<MCFixup> &Fixups, 61 const MCSubtargetInfo &STI) const override; 62 63 /// \returns the encoding for an MCOperand. 64 uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO, 65 SmallVectorImpl<MCFixup> &Fixups, 66 const MCSubtargetInfo &STI) const override; 67 68 /// Use a fixup to encode the simm16 field for SOPP branch 69 /// instructions. 70 unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 71 SmallVectorImpl<MCFixup> &Fixups, 72 const MCSubtargetInfo &STI) const override; 73 74 unsigned getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, 75 SmallVectorImpl<MCFixup> &Fixups, 76 const MCSubtargetInfo &STI) const override; 77 78 unsigned getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, 79 SmallVectorImpl<MCFixup> &Fixups, 80 const MCSubtargetInfo &STI) const override; 81 82 unsigned getAVOperandEncoding(const MCInst &MI, unsigned OpNo, 83 SmallVectorImpl<MCFixup> &Fixups, 84 const MCSubtargetInfo &STI) const override; 85 }; 86 87 } // end anonymous namespace 88 89 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII, 90 const MCRegisterInfo &MRI, 91 MCContext &Ctx) { 92 return new SIMCCodeEmitter(MCII, MRI, Ctx); 93 } 94 95 // Returns the encoding value to use if the given integer is an integer inline 96 // immediate value, or 0 if it is not. 97 template <typename IntTy> 98 static uint32_t getIntInlineImmEncoding(IntTy Imm) { 99 if (Imm >= 0 && Imm <= 64) 100 return 128 + Imm; 101 102 if (Imm >= -16 && Imm <= -1) 103 return 192 + std::abs(Imm); 104 105 return 0; 106 } 107 108 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) { 109 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 110 if (IntImm != 0) 111 return IntImm; 112 113 if (Val == 0x3800) // 0.5 114 return 240; 115 116 if (Val == 0xB800) // -0.5 117 return 241; 118 119 if (Val == 0x3C00) // 1.0 120 return 242; 121 122 if (Val == 0xBC00) // -1.0 123 return 243; 124 125 if (Val == 0x4000) // 2.0 126 return 244; 127 128 if (Val == 0xC000) // -2.0 129 return 245; 130 131 if (Val == 0x4400) // 4.0 132 return 246; 133 134 if (Val == 0xC400) // -4.0 135 return 247; 136 137 if (Val == 0x3118 && // 1.0 / (2.0 * pi) 138 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 139 return 248; 140 141 return 255; 142 } 143 144 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) { 145 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val)); 146 if (IntImm != 0) 147 return IntImm; 148 149 if (Val == FloatToBits(0.5f)) 150 return 240; 151 152 if (Val == FloatToBits(-0.5f)) 153 return 241; 154 155 if (Val == FloatToBits(1.0f)) 156 return 242; 157 158 if (Val == FloatToBits(-1.0f)) 159 return 243; 160 161 if (Val == FloatToBits(2.0f)) 162 return 244; 163 164 if (Val == FloatToBits(-2.0f)) 165 return 245; 166 167 if (Val == FloatToBits(4.0f)) 168 return 246; 169 170 if (Val == FloatToBits(-4.0f)) 171 return 247; 172 173 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi) 174 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 175 return 248; 176 177 return 255; 178 } 179 180 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) { 181 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val)); 182 if (IntImm != 0) 183 return IntImm; 184 185 if (Val == DoubleToBits(0.5)) 186 return 240; 187 188 if (Val == DoubleToBits(-0.5)) 189 return 241; 190 191 if (Val == DoubleToBits(1.0)) 192 return 242; 193 194 if (Val == DoubleToBits(-1.0)) 195 return 243; 196 197 if (Val == DoubleToBits(2.0)) 198 return 244; 199 200 if (Val == DoubleToBits(-2.0)) 201 return 245; 202 203 if (Val == DoubleToBits(4.0)) 204 return 246; 205 206 if (Val == DoubleToBits(-4.0)) 207 return 247; 208 209 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi) 210 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 211 return 248; 212 213 return 255; 214 } 215 216 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO, 217 const MCOperandInfo &OpInfo, 218 const MCSubtargetInfo &STI) const { 219 int64_t Imm; 220 if (MO.isExpr()) { 221 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr()); 222 if (!C) 223 return 255; 224 225 Imm = C->getValue(); 226 } else { 227 228 assert(!MO.isFPImm()); 229 230 if (!MO.isImm()) 231 return ~0; 232 233 Imm = MO.getImm(); 234 } 235 236 switch (OpInfo.OperandType) { 237 case AMDGPU::OPERAND_REG_IMM_INT32: 238 case AMDGPU::OPERAND_REG_IMM_FP32: 239 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 240 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 241 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 242 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 243 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 244 245 case AMDGPU::OPERAND_REG_IMM_INT64: 246 case AMDGPU::OPERAND_REG_IMM_FP64: 247 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 248 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 249 return getLit64Encoding(static_cast<uint64_t>(Imm), STI); 250 251 case AMDGPU::OPERAND_REG_IMM_INT16: 252 case AMDGPU::OPERAND_REG_IMM_FP16: 253 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 254 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 255 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 256 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 257 // FIXME Is this correct? What do inline immediates do on SI for f16 src 258 // which does not have f16 support? 259 return getLit16Encoding(static_cast<uint16_t>(Imm), STI); 260 261 case AMDGPU::OPERAND_REG_IMM_V2INT16: 262 case AMDGPU::OPERAND_REG_IMM_V2FP16: 263 if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) 264 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 265 LLVM_FALLTHROUGH; 266 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 267 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 268 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 269 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 270 uint16_t Lo16 = static_cast<uint16_t>(Imm); 271 uint32_t Encoding = getLit16Encoding(Lo16, STI); 272 return Encoding; 273 } 274 default: 275 llvm_unreachable("invalid operand size"); 276 } 277 } 278 279 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, 280 SmallVectorImpl<MCFixup> &Fixups, 281 const MCSubtargetInfo &STI) const { 282 verifyInstructionPredicates(MI, 283 computeAvailableFeatures(STI.getFeatureBits())); 284 285 uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI); 286 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 287 unsigned bytes = Desc.getSize(); 288 289 for (unsigned i = 0; i < bytes; i++) { 290 OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff)); 291 } 292 293 // NSA encoding. 294 if (AMDGPU::isGFX10(STI) && Desc.TSFlags & SIInstrFlags::MIMG) { 295 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 296 AMDGPU::OpName::vaddr0); 297 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 298 AMDGPU::OpName::srsrc); 299 assert(vaddr0 >= 0 && srsrc > vaddr0); 300 unsigned NumExtraAddrs = srsrc - vaddr0 - 1; 301 unsigned NumPadding = (-NumExtraAddrs) & 3; 302 303 for (unsigned i = 0; i < NumExtraAddrs; ++i) 304 OS.write((uint8_t)getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), 305 Fixups, STI)); 306 for (unsigned i = 0; i < NumPadding; ++i) 307 OS.write(0); 308 } 309 310 if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) || 311 (bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])) 312 return; 313 314 // Check for additional literals in SRC0/1/2 (Op 1/2/3) 315 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) { 316 317 // Check if this operand should be encoded as [SV]Src 318 if (!AMDGPU::isSISrcOperand(Desc, i)) 319 continue; 320 321 // Is this operand a literal immediate? 322 const MCOperand &Op = MI.getOperand(i); 323 if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255) 324 continue; 325 326 // Yes! Encode it 327 int64_t Imm = 0; 328 329 if (Op.isImm()) 330 Imm = Op.getImm(); 331 else if (Op.isExpr()) { 332 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr())) 333 Imm = C->getValue(); 334 335 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value. 336 llvm_unreachable("Must be immediate or expr"); 337 338 for (unsigned j = 0; j < 4; j++) { 339 OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff)); 340 } 341 342 // Only one literal value allowed 343 break; 344 } 345 } 346 347 unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 348 SmallVectorImpl<MCFixup> &Fixups, 349 const MCSubtargetInfo &STI) const { 350 const MCOperand &MO = MI.getOperand(OpNo); 351 352 if (MO.isExpr()) { 353 const MCExpr *Expr = MO.getExpr(); 354 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br; 355 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 356 return 0; 357 } 358 359 return getMachineOpValue(MI, MO, Fixups, STI); 360 } 361 362 unsigned 363 SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, 364 SmallVectorImpl<MCFixup> &Fixups, 365 const MCSubtargetInfo &STI) const { 366 using namespace AMDGPU::SDWA; 367 368 uint64_t RegEnc = 0; 369 370 const MCOperand &MO = MI.getOperand(OpNo); 371 372 if (MO.isReg()) { 373 unsigned Reg = MO.getReg(); 374 RegEnc |= MRI.getEncodingValue(Reg); 375 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK; 376 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) { 377 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK; 378 } 379 return RegEnc; 380 } else { 381 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 382 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 383 if (Enc != ~0U && Enc != 255) { 384 return Enc | SDWA9EncValues::SRC_SGPR_MASK; 385 } 386 } 387 388 llvm_unreachable("Unsupported operand kind"); 389 return 0; 390 } 391 392 unsigned 393 SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, 394 SmallVectorImpl<MCFixup> &Fixups, 395 const MCSubtargetInfo &STI) const { 396 using namespace AMDGPU::SDWA; 397 398 uint64_t RegEnc = 0; 399 400 const MCOperand &MO = MI.getOperand(OpNo); 401 402 unsigned Reg = MO.getReg(); 403 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) { 404 RegEnc |= MRI.getEncodingValue(Reg); 405 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 406 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK; 407 } 408 return RegEnc; 409 } 410 411 unsigned 412 SIMCCodeEmitter::getAVOperandEncoding(const MCInst &MI, unsigned OpNo, 413 SmallVectorImpl<MCFixup> &Fixups, 414 const MCSubtargetInfo &STI) const { 415 unsigned Reg = MI.getOperand(OpNo).getReg(); 416 uint64_t Enc = MRI.getEncodingValue(Reg); 417 418 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma 419 // instructions use acc[0:1] modifier bits to distinguish. These bits are 420 // encoded as a virtual 9th bit of the register for these operands. 421 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) || 422 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg)) 423 Enc |= 512; 424 425 return Enc; 426 } 427 428 static bool needsPCRel(const MCExpr *Expr) { 429 switch (Expr->getKind()) { 430 case MCExpr::SymbolRef: { 431 auto *SE = cast<MCSymbolRefExpr>(Expr); 432 MCSymbolRefExpr::VariantKind Kind = SE->getKind(); 433 return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO && 434 Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI; 435 } 436 case MCExpr::Binary: { 437 auto *BE = cast<MCBinaryExpr>(Expr); 438 if (BE->getOpcode() == MCBinaryExpr::Sub) 439 return false; 440 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS()); 441 } 442 case MCExpr::Unary: 443 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr()); 444 case MCExpr::Target: 445 case MCExpr::Constant: 446 return false; 447 } 448 llvm_unreachable("invalid kind"); 449 } 450 451 uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI, 452 const MCOperand &MO, 453 SmallVectorImpl<MCFixup> &Fixups, 454 const MCSubtargetInfo &STI) const { 455 if (MO.isReg()) 456 return MRI.getEncodingValue(MO.getReg()); 457 458 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) { 459 // FIXME: If this is expression is PCRel or not should not depend on what 460 // the expression looks like. Given that this is just a general expression, 461 // it should probably be FK_Data_4 and whatever is producing 462 // 463 // s_add_u32 s2, s2, (extern_const_addrspace+16 464 // 465 // And expecting a PCRel should instead produce 466 // 467 // .Ltmp1: 468 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1 469 MCFixupKind Kind; 470 if (needsPCRel(MO.getExpr())) 471 Kind = FK_PCRel_4; 472 else 473 Kind = FK_Data_4; 474 475 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 476 uint32_t Offset = Desc.getSize(); 477 assert(Offset == 4 || Offset == 8); 478 479 Fixups.push_back( 480 MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc())); 481 } 482 483 // Figure out the operand number, needed for isSrcOperand check 484 unsigned OpNo = 0; 485 for (unsigned e = MI.getNumOperands(); OpNo < e; ++OpNo) { 486 if (&MO == &MI.getOperand(OpNo)) 487 break; 488 } 489 490 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 491 if (AMDGPU::isSISrcOperand(Desc, OpNo)) { 492 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 493 if (Enc != ~0U && 494 (Enc != 255 || Desc.getSize() == 4 || Desc.getSize() == 8)) 495 return Enc; 496 497 } else if (MO.isImm()) 498 return MO.getImm(); 499 500 llvm_unreachable("Encoding of this operand type is not supported yet."); 501 return 0; 502 } 503 504 #define ENABLE_INSTR_PREDICATE_VERIFIER 505 #include "AMDGPUGenMCCodeEmitter.inc" 506