1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// The SI code emitter produces machine code that can be executed 11 /// directly on the GPU device. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "MCTargetDesc/AMDGPUFixupKinds.h" 17 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h" 18 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 19 #include "SIDefines.h" 20 #include "Utils/AMDGPUBaseInfo.h" 21 #include "llvm/MC/MCCodeEmitter.h" 22 #include "llvm/MC/MCContext.h" 23 #include "llvm/MC/MCExpr.h" 24 #include "llvm/MC/MCFixup.h" 25 #include "llvm/MC/MCInst.h" 26 #include "llvm/MC/MCInstrDesc.h" 27 #include "llvm/MC/MCInstrInfo.h" 28 #include "llvm/MC/MCRegisterInfo.h" 29 #include "llvm/MC/MCSubtargetInfo.h" 30 #include "llvm/MC/MCSymbol.h" 31 #include "llvm/Support/Casting.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include <cassert> 36 #include <cstdint> 37 #include <cstdlib> 38 39 using namespace llvm; 40 41 namespace { 42 43 class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { 44 const MCRegisterInfo &MRI; 45 46 /// Encode an fp or int literal 47 uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo, 48 const MCSubtargetInfo &STI) const; 49 50 public: 51 SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri, 52 MCContext &ctx) 53 : AMDGPUMCCodeEmitter(mcii), MRI(mri) {} 54 SIMCCodeEmitter(const SIMCCodeEmitter &) = delete; 55 SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete; 56 57 /// Encode the instruction and write it to the OS. 58 void encodeInstruction(const MCInst &MI, raw_ostream &OS, 59 SmallVectorImpl<MCFixup> &Fixups, 60 const MCSubtargetInfo &STI) const override; 61 62 /// \returns the encoding for an MCOperand. 63 uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO, 64 SmallVectorImpl<MCFixup> &Fixups, 65 const MCSubtargetInfo &STI) const override; 66 67 /// Use a fixup to encode the simm16 field for SOPP branch 68 /// instructions. 69 unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 70 SmallVectorImpl<MCFixup> &Fixups, 71 const MCSubtargetInfo &STI) const override; 72 73 unsigned getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, 74 SmallVectorImpl<MCFixup> &Fixups, 75 const MCSubtargetInfo &STI) const override; 76 77 unsigned getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, 78 SmallVectorImpl<MCFixup> &Fixups, 79 const MCSubtargetInfo &STI) const override; 80 }; 81 82 } // end anonymous namespace 83 84 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII, 85 const MCRegisterInfo &MRI, 86 MCContext &Ctx) { 87 return new SIMCCodeEmitter(MCII, MRI, Ctx); 88 } 89 90 // Returns the encoding value to use if the given integer is an integer inline 91 // immediate value, or 0 if it is not. 92 template <typename IntTy> 93 static uint32_t getIntInlineImmEncoding(IntTy Imm) { 94 if (Imm >= 0 && Imm <= 64) 95 return 128 + Imm; 96 97 if (Imm >= -16 && Imm <= -1) 98 return 192 + std::abs(Imm); 99 100 return 0; 101 } 102 103 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) { 104 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 105 if (IntImm != 0) 106 return IntImm; 107 108 if (Val == 0x3800) // 0.5 109 return 240; 110 111 if (Val == 0xB800) // -0.5 112 return 241; 113 114 if (Val == 0x3C00) // 1.0 115 return 242; 116 117 if (Val == 0xBC00) // -1.0 118 return 243; 119 120 if (Val == 0x4000) // 2.0 121 return 244; 122 123 if (Val == 0xC000) // -2.0 124 return 245; 125 126 if (Val == 0x4400) // 4.0 127 return 246; 128 129 if (Val == 0xC400) // -4.0 130 return 247; 131 132 if (Val == 0x3118 && // 1.0 / (2.0 * pi) 133 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 134 return 248; 135 136 return 255; 137 } 138 139 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) { 140 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val)); 141 if (IntImm != 0) 142 return IntImm; 143 144 if (Val == FloatToBits(0.5f)) 145 return 240; 146 147 if (Val == FloatToBits(-0.5f)) 148 return 241; 149 150 if (Val == FloatToBits(1.0f)) 151 return 242; 152 153 if (Val == FloatToBits(-1.0f)) 154 return 243; 155 156 if (Val == FloatToBits(2.0f)) 157 return 244; 158 159 if (Val == FloatToBits(-2.0f)) 160 return 245; 161 162 if (Val == FloatToBits(4.0f)) 163 return 246; 164 165 if (Val == FloatToBits(-4.0f)) 166 return 247; 167 168 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi) 169 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 170 return 248; 171 172 return 255; 173 } 174 175 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) { 176 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val)); 177 if (IntImm != 0) 178 return IntImm; 179 180 if (Val == DoubleToBits(0.5)) 181 return 240; 182 183 if (Val == DoubleToBits(-0.5)) 184 return 241; 185 186 if (Val == DoubleToBits(1.0)) 187 return 242; 188 189 if (Val == DoubleToBits(-1.0)) 190 return 243; 191 192 if (Val == DoubleToBits(2.0)) 193 return 244; 194 195 if (Val == DoubleToBits(-2.0)) 196 return 245; 197 198 if (Val == DoubleToBits(4.0)) 199 return 246; 200 201 if (Val == DoubleToBits(-4.0)) 202 return 247; 203 204 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi) 205 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 206 return 248; 207 208 return 255; 209 } 210 211 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO, 212 const MCOperandInfo &OpInfo, 213 const MCSubtargetInfo &STI) const { 214 int64_t Imm; 215 if (MO.isExpr()) { 216 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr()); 217 if (!C) 218 return 255; 219 220 Imm = C->getValue(); 221 } else { 222 223 assert(!MO.isFPImm()); 224 225 if (!MO.isImm()) 226 return ~0; 227 228 Imm = MO.getImm(); 229 } 230 231 switch (OpInfo.OperandType) { 232 case AMDGPU::OPERAND_REG_IMM_INT32: 233 case AMDGPU::OPERAND_REG_IMM_FP32: 234 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 235 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 236 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 237 238 case AMDGPU::OPERAND_REG_IMM_INT64: 239 case AMDGPU::OPERAND_REG_IMM_FP64: 240 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 241 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 242 return getLit64Encoding(static_cast<uint64_t>(Imm), STI); 243 244 case AMDGPU::OPERAND_REG_IMM_INT16: 245 case AMDGPU::OPERAND_REG_IMM_FP16: 246 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 247 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 248 // FIXME Is this correct? What do inline immediates do on SI for f16 src 249 // which does not have f16 support? 250 return getLit16Encoding(static_cast<uint16_t>(Imm), STI); 251 252 case AMDGPU::OPERAND_REG_IMM_V2INT16: 253 case AMDGPU::OPERAND_REG_IMM_V2FP16: 254 if (!isUInt<16>(Imm) && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) 255 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 256 LLVM_FALLTHROUGH; 257 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 258 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: { 259 uint16_t Lo16 = static_cast<uint16_t>(Imm); 260 uint32_t Encoding = getLit16Encoding(Lo16, STI); 261 return Encoding; 262 } 263 default: 264 llvm_unreachable("invalid operand size"); 265 } 266 } 267 268 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, 269 SmallVectorImpl<MCFixup> &Fixups, 270 const MCSubtargetInfo &STI) const { 271 verifyInstructionPredicates(MI, 272 computeAvailableFeatures(STI.getFeatureBits())); 273 274 uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI); 275 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 276 unsigned bytes = Desc.getSize(); 277 278 for (unsigned i = 0; i < bytes; i++) { 279 OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff)); 280 } 281 282 // NSA encoding. 283 if (AMDGPU::isGFX10(STI) && Desc.TSFlags & SIInstrFlags::MIMG) { 284 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 285 AMDGPU::OpName::vaddr0); 286 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 287 AMDGPU::OpName::srsrc); 288 assert(vaddr0 >= 0 && srsrc > vaddr0); 289 unsigned NumExtraAddrs = srsrc - vaddr0 - 1; 290 unsigned NumPadding = (-NumExtraAddrs) & 3; 291 292 for (unsigned i = 0; i < NumExtraAddrs; ++i) 293 OS.write((uint8_t)getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), 294 Fixups, STI)); 295 for (unsigned i = 0; i < NumPadding; ++i) 296 OS.write(0); 297 } 298 299 if ((bytes > 8 && STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) || 300 (bytes > 4 && !STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal])) 301 return; 302 303 // Check for additional literals in SRC0/1/2 (Op 1/2/3) 304 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) { 305 306 // Check if this operand should be encoded as [SV]Src 307 if (!AMDGPU::isSISrcOperand(Desc, i)) 308 continue; 309 310 // Is this operand a literal immediate? 311 const MCOperand &Op = MI.getOperand(i); 312 if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255) 313 continue; 314 315 // Yes! Encode it 316 int64_t Imm = 0; 317 318 if (Op.isImm()) 319 Imm = Op.getImm(); 320 else if (Op.isExpr()) { 321 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr())) 322 Imm = C->getValue(); 323 324 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value. 325 llvm_unreachable("Must be immediate or expr"); 326 327 for (unsigned j = 0; j < 4; j++) { 328 OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff)); 329 } 330 331 // Only one literal value allowed 332 break; 333 } 334 } 335 336 unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 337 SmallVectorImpl<MCFixup> &Fixups, 338 const MCSubtargetInfo &STI) const { 339 const MCOperand &MO = MI.getOperand(OpNo); 340 341 if (MO.isExpr()) { 342 const MCExpr *Expr = MO.getExpr(); 343 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br; 344 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 345 return 0; 346 } 347 348 return getMachineOpValue(MI, MO, Fixups, STI); 349 } 350 351 unsigned 352 SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, 353 SmallVectorImpl<MCFixup> &Fixups, 354 const MCSubtargetInfo &STI) const { 355 using namespace AMDGPU::SDWA; 356 357 uint64_t RegEnc = 0; 358 359 const MCOperand &MO = MI.getOperand(OpNo); 360 361 if (MO.isReg()) { 362 unsigned Reg = MO.getReg(); 363 RegEnc |= MRI.getEncodingValue(Reg); 364 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK; 365 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) { 366 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK; 367 } 368 return RegEnc; 369 } else { 370 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 371 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 372 if (Enc != ~0U && Enc != 255) { 373 return Enc | SDWA9EncValues::SRC_SGPR_MASK; 374 } 375 } 376 377 llvm_unreachable("Unsupported operand kind"); 378 return 0; 379 } 380 381 unsigned 382 SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, 383 SmallVectorImpl<MCFixup> &Fixups, 384 const MCSubtargetInfo &STI) const { 385 using namespace AMDGPU::SDWA; 386 387 uint64_t RegEnc = 0; 388 389 const MCOperand &MO = MI.getOperand(OpNo); 390 391 unsigned Reg = MO.getReg(); 392 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) { 393 RegEnc |= MRI.getEncodingValue(Reg); 394 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 395 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK; 396 } 397 return RegEnc; 398 } 399 400 static bool needsPCRel(const MCExpr *Expr) { 401 switch (Expr->getKind()) { 402 case MCExpr::SymbolRef: { 403 auto *SE = cast<MCSymbolRefExpr>(Expr); 404 MCSymbolRefExpr::VariantKind Kind = SE->getKind(); 405 return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO && 406 Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI; 407 } 408 case MCExpr::Binary: { 409 auto *BE = cast<MCBinaryExpr>(Expr); 410 if (BE->getOpcode() == MCBinaryExpr::Sub) 411 return false; 412 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS()); 413 } 414 case MCExpr::Unary: 415 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr()); 416 case MCExpr::Target: 417 case MCExpr::Constant: 418 return false; 419 } 420 llvm_unreachable("invalid kind"); 421 } 422 423 uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI, 424 const MCOperand &MO, 425 SmallVectorImpl<MCFixup> &Fixups, 426 const MCSubtargetInfo &STI) const { 427 if (MO.isReg()) 428 return MRI.getEncodingValue(MO.getReg()); 429 430 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) { 431 // FIXME: If this is expression is PCRel or not should not depend on what 432 // the expression looks like. Given that this is just a general expression, 433 // it should probably be FK_Data_4 and whatever is producing 434 // 435 // s_add_u32 s2, s2, (extern_const_addrspace+16 436 // 437 // And expecting a PCRel should instead produce 438 // 439 // .Ltmp1: 440 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1 441 MCFixupKind Kind; 442 if (needsPCRel(MO.getExpr())) 443 Kind = FK_PCRel_4; 444 else 445 Kind = FK_Data_4; 446 447 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 448 uint32_t Offset = Desc.getSize(); 449 assert(Offset == 4 || Offset == 8); 450 451 Fixups.push_back( 452 MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc())); 453 } 454 455 // Figure out the operand number, needed for isSrcOperand check 456 unsigned OpNo = 0; 457 for (unsigned e = MI.getNumOperands(); OpNo < e; ++OpNo) { 458 if (&MO == &MI.getOperand(OpNo)) 459 break; 460 } 461 462 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 463 if (AMDGPU::isSISrcOperand(Desc, OpNo)) { 464 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 465 if (Enc != ~0U && 466 (Enc != 255 || Desc.getSize() == 4 || Desc.getSize() == 8)) 467 return Enc; 468 469 } else if (MO.isImm()) 470 return MO.getImm(); 471 472 llvm_unreachable("Encoding of this operand type is not supported yet."); 473 return 0; 474 } 475 476 #define ENABLE_INSTR_PREDICATE_VERIFIER 477 #include "AMDGPUGenMCCodeEmitter.inc" 478