1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief The SI code emitter produces machine code that can be executed 12 /// directly on the GPU device. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "AMDGPU.h" 17 #include "MCTargetDesc/AMDGPUFixupKinds.h" 18 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "Utils/AMDGPUBaseInfo.h" 21 #include "llvm/MC/MCCodeEmitter.h" 22 #include "llvm/MC/MCContext.h" 23 #include "llvm/MC/MCExpr.h" 24 #include "llvm/MC/MCFixup.h" 25 #include "llvm/MC/MCInst.h" 26 #include "llvm/MC/MCInstrDesc.h" 27 #include "llvm/MC/MCInstrInfo.h" 28 #include "llvm/MC/MCRegisterInfo.h" 29 #include "llvm/MC/MCSubtargetInfo.h" 30 #include "llvm/MC/MCSymbol.h" 31 #include "llvm/Support/Casting.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include <cassert> 36 #include <cstdint> 37 #include <cstdlib> 38 39 using namespace llvm; 40 41 namespace { 42 43 class SIMCCodeEmitter : public AMDGPUMCCodeEmitter { 44 const MCRegisterInfo &MRI; 45 46 /// \brief Encode an fp or int literal 47 uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo, 48 const MCSubtargetInfo &STI) const; 49 50 public: 51 SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri, 52 MCContext &ctx) 53 : AMDGPUMCCodeEmitter(mcii), MRI(mri) {} 54 SIMCCodeEmitter(const SIMCCodeEmitter &) = delete; 55 SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete; 56 57 /// \brief Encode the instruction and write it to the OS. 58 void encodeInstruction(const MCInst &MI, raw_ostream &OS, 59 SmallVectorImpl<MCFixup> &Fixups, 60 const MCSubtargetInfo &STI) const override; 61 62 /// \returns the encoding for an MCOperand. 63 uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO, 64 SmallVectorImpl<MCFixup> &Fixups, 65 const MCSubtargetInfo &STI) const override; 66 67 /// \brief Use a fixup to encode the simm16 field for SOPP branch 68 /// instructions. 69 unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 70 SmallVectorImpl<MCFixup> &Fixups, 71 const MCSubtargetInfo &STI) const override; 72 }; 73 74 } // end anonymous namespace 75 76 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII, 77 const MCRegisterInfo &MRI, 78 MCContext &Ctx) { 79 return new SIMCCodeEmitter(MCII, MRI, Ctx); 80 } 81 82 // Returns the encoding value to use if the given integer is an integer inline 83 // immediate value, or 0 if it is not. 84 template <typename IntTy> 85 static uint32_t getIntInlineImmEncoding(IntTy Imm) { 86 if (Imm >= 0 && Imm <= 64) 87 return 128 + Imm; 88 89 if (Imm >= -16 && Imm <= -1) 90 return 192 + std::abs(Imm); 91 92 return 0; 93 } 94 95 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) { 96 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 97 if (IntImm != 0) 98 return IntImm; 99 100 if (Val == 0x3800) // 0.5 101 return 240; 102 103 if (Val == 0xB800) // -0.5 104 return 241; 105 106 if (Val == 0x3C00) // 1.0 107 return 242; 108 109 if (Val == 0xBC00) // -1.0 110 return 243; 111 112 if (Val == 0x4000) // 2.0 113 return 244; 114 115 if (Val == 0xC000) // -2.0 116 return 245; 117 118 if (Val == 0x4400) // 4.0 119 return 246; 120 121 if (Val == 0xC400) // -4.0 122 return 247; 123 124 if (Val == 0x3118 && // 1.0 / (2.0 * pi) 125 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 126 return 248; 127 128 return 255; 129 } 130 131 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) { 132 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val)); 133 if (IntImm != 0) 134 return IntImm; 135 136 if (Val == FloatToBits(0.5f)) 137 return 240; 138 139 if (Val == FloatToBits(-0.5f)) 140 return 241; 141 142 if (Val == FloatToBits(1.0f)) 143 return 242; 144 145 if (Val == FloatToBits(-1.0f)) 146 return 243; 147 148 if (Val == FloatToBits(2.0f)) 149 return 244; 150 151 if (Val == FloatToBits(-2.0f)) 152 return 245; 153 154 if (Val == FloatToBits(4.0f)) 155 return 246; 156 157 if (Val == FloatToBits(-4.0f)) 158 return 247; 159 160 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi) 161 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 162 return 248; 163 164 return 255; 165 } 166 167 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) { 168 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val)); 169 if (IntImm != 0) 170 return IntImm; 171 172 if (Val == DoubleToBits(0.5)) 173 return 240; 174 175 if (Val == DoubleToBits(-0.5)) 176 return 241; 177 178 if (Val == DoubleToBits(1.0)) 179 return 242; 180 181 if (Val == DoubleToBits(-1.0)) 182 return 243; 183 184 if (Val == DoubleToBits(2.0)) 185 return 244; 186 187 if (Val == DoubleToBits(-2.0)) 188 return 245; 189 190 if (Val == DoubleToBits(4.0)) 191 return 246; 192 193 if (Val == DoubleToBits(-4.0)) 194 return 247; 195 196 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi) 197 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 198 return 248; 199 200 return 255; 201 } 202 203 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO, 204 const MCOperandInfo &OpInfo, 205 const MCSubtargetInfo &STI) const { 206 int64_t Imm; 207 if (MO.isExpr()) { 208 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr()); 209 if (!C) 210 return 255; 211 212 Imm = C->getValue(); 213 } else { 214 215 assert(!MO.isFPImm()); 216 217 if (!MO.isImm()) 218 return ~0; 219 220 Imm = MO.getImm(); 221 } 222 223 switch (AMDGPU::getOperandSize(OpInfo)) { 224 case 4: 225 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 226 case 8: 227 return getLit64Encoding(static_cast<uint64_t>(Imm), STI); 228 case 2: 229 return getLit16Encoding(static_cast<uint16_t>(Imm), STI); 230 default: 231 llvm_unreachable("invalid operand size"); 232 } 233 } 234 235 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, 236 SmallVectorImpl<MCFixup> &Fixups, 237 const MCSubtargetInfo &STI) const { 238 verifyInstructionPredicates(MI, 239 computeAvailableFeatures(STI.getFeatureBits())); 240 241 uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI); 242 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 243 unsigned bytes = Desc.getSize(); 244 245 for (unsigned i = 0; i < bytes; i++) { 246 OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff)); 247 } 248 249 if (bytes > 4) 250 return; 251 252 // Check for additional literals in SRC0/1/2 (Op 1/2/3) 253 for (unsigned i = 0, e = MI.getNumOperands(); i < e; ++i) { 254 255 // Check if this operand should be encoded as [SV]Src 256 if (!AMDGPU::isSISrcOperand(Desc, i)) 257 continue; 258 259 // Is this operand a literal immediate? 260 const MCOperand &Op = MI.getOperand(i); 261 if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255) 262 continue; 263 264 // Yes! Encode it 265 int64_t Imm = 0; 266 267 if (Op.isImm()) 268 Imm = Op.getImm(); 269 else if (Op.isExpr()) { 270 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr())) 271 Imm = C->getValue(); 272 273 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value. 274 llvm_unreachable("Must be immediate or expr"); 275 276 for (unsigned j = 0; j < 4; j++) { 277 OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff)); 278 } 279 280 // Only one literal value allowed 281 break; 282 } 283 } 284 285 unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 286 SmallVectorImpl<MCFixup> &Fixups, 287 const MCSubtargetInfo &STI) const { 288 const MCOperand &MO = MI.getOperand(OpNo); 289 290 if (MO.isExpr()) { 291 const MCExpr *Expr = MO.getExpr(); 292 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br; 293 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 294 return 0; 295 } 296 297 return getMachineOpValue(MI, MO, Fixups, STI); 298 } 299 300 uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI, 301 const MCOperand &MO, 302 SmallVectorImpl<MCFixup> &Fixups, 303 const MCSubtargetInfo &STI) const { 304 if (MO.isReg()) 305 return MRI.getEncodingValue(MO.getReg()); 306 307 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) { 308 const auto *Expr = dyn_cast<MCSymbolRefExpr>(MO.getExpr()); 309 MCFixupKind Kind; 310 if (Expr && Expr->getSymbol().isExternal()) 311 Kind = FK_Data_4; 312 else 313 Kind = FK_PCRel_4; 314 Fixups.push_back(MCFixup::create(4, MO.getExpr(), Kind, MI.getLoc())); 315 } 316 317 // Figure out the operand number, needed for isSrcOperand check 318 unsigned OpNo = 0; 319 for (unsigned e = MI.getNumOperands(); OpNo < e; ++OpNo) { 320 if (&MO == &MI.getOperand(OpNo)) 321 break; 322 } 323 324 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 325 if (AMDGPU::isSISrcOperand(Desc, OpNo)) { 326 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); 327 if (Enc != ~0U && (Enc != 255 || Desc.getSize() == 4)) 328 return Enc; 329 330 } else if (MO.isImm()) 331 return MO.getImm(); 332 333 llvm_unreachable("Encoding of this operand type is not supported yet."); 334 return 0; 335 } 336