1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// \file 9 //===----------------------------------------------------------------------===// 10 11 #include "MCTargetDesc/AMDGPUFixupKinds.h" 12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 13 #include "llvm/ADT/StringRef.h" 14 #include "llvm/BinaryFormat/ELF.h" 15 #include "llvm/MC/MCAsmBackend.h" 16 #include "llvm/MC/MCAssembler.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCFixupKindInfo.h" 19 #include "llvm/MC/MCObjectWriter.h" 20 #include "llvm/MC/MCValue.h" 21 #include "llvm/Support/TargetRegistry.h" 22 23 using namespace llvm; 24 25 namespace { 26 27 class AMDGPUAsmBackend : public MCAsmBackend { 28 public: 29 AMDGPUAsmBackend(const Target &T) 30 : MCAsmBackend() {} 31 32 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; }; 33 34 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, 35 const MCValue &Target, MutableArrayRef<char> Data, 36 uint64_t Value, bool IsResolved) const override; 37 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 38 const MCRelaxableFragment *DF, 39 const MCAsmLayout &Layout) const override { 40 return false; 41 } 42 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, 43 MCInst &Res) const override { 44 llvm_unreachable("Not implemented"); 45 } 46 bool mayNeedRelaxation(const MCInst &Inst) const override { return false; } 47 48 unsigned getMinimumNopSize() const override; 49 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; 50 51 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override; 52 }; 53 54 } //End anonymous namespace 55 56 static unsigned getFixupKindNumBytes(unsigned Kind) { 57 switch (Kind) { 58 case AMDGPU::fixup_si_sopp_br: 59 return 2; 60 case FK_SecRel_1: 61 case FK_Data_1: 62 return 1; 63 case FK_SecRel_2: 64 case FK_Data_2: 65 return 2; 66 case FK_SecRel_4: 67 case FK_Data_4: 68 case FK_PCRel_4: 69 return 4; 70 case FK_SecRel_8: 71 case FK_Data_8: 72 return 8; 73 default: 74 llvm_unreachable("Unknown fixup kind!"); 75 } 76 } 77 78 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 79 MCContext *Ctx) { 80 int64_t SignedValue = static_cast<int64_t>(Value); 81 82 switch (static_cast<unsigned>(Fixup.getKind())) { 83 case AMDGPU::fixup_si_sopp_br: { 84 int64_t BrImm = (SignedValue - 4) / 4; 85 86 if (Ctx && !isInt<16>(BrImm)) 87 Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16"); 88 89 return BrImm; 90 } 91 case FK_Data_1: 92 case FK_Data_2: 93 case FK_Data_4: 94 case FK_Data_8: 95 case FK_PCRel_4: 96 case FK_SecRel_4: 97 return Value; 98 default: 99 llvm_unreachable("unhandled fixup kind"); 100 } 101 } 102 103 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, 104 const MCValue &Target, 105 MutableArrayRef<char> Data, uint64_t Value, 106 bool IsResolved) const { 107 Value = adjustFixupValue(Fixup, Value, &Asm.getContext()); 108 if (!Value) 109 return; // Doesn't change encoding. 110 111 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); 112 113 // Shift the value into position. 114 Value <<= Info.TargetOffset; 115 116 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 117 uint32_t Offset = Fixup.getOffset(); 118 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 119 120 // For each byte of the fragment that the fixup touches, mask in the bits from 121 // the fixup value. 122 for (unsigned i = 0; i != NumBytes; ++i) 123 Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff); 124 } 125 126 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo( 127 MCFixupKind Kind) const { 128 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = { 129 // name offset bits flags 130 { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel }, 131 }; 132 133 if (Kind < FirstTargetFixupKind) 134 return MCAsmBackend::getFixupKindInfo(Kind); 135 136 return Infos[Kind - FirstTargetFixupKind]; 137 } 138 139 unsigned AMDGPUAsmBackend::getMinimumNopSize() const { 140 return 4; 141 } 142 143 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 144 // If the count is not 4-byte aligned, we must be writing data into the text 145 // section (otherwise we have unaligned instructions, and thus have far 146 // bigger problems), so just write zeros instead. 147 OW->WriteZeros(Count % 4); 148 149 // We are properly aligned, so write NOPs as requested. 150 Count /= 4; 151 152 // FIXME: R600 support. 153 // s_nop 0 154 const uint32_t Encoded_S_NOP_0 = 0xbf800000; 155 156 for (uint64_t I = 0; I != Count; ++I) 157 OW->write32(Encoded_S_NOP_0); 158 159 return true; 160 } 161 162 //===----------------------------------------------------------------------===// 163 // ELFAMDGPUAsmBackend class 164 //===----------------------------------------------------------------------===// 165 166 namespace { 167 168 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend { 169 bool Is64Bit; 170 bool HasRelocationAddend; 171 uint8_t OSABI = ELF::ELFOSABI_NONE; 172 173 public: 174 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) : 175 AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn), 176 HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { 177 switch (TT.getOS()) { 178 case Triple::AMDHSA: 179 OSABI = ELF::ELFOSABI_AMDGPU_HSA; 180 break; 181 case Triple::AMDPAL: 182 OSABI = ELF::ELFOSABI_AMDGPU_PAL; 183 break; 184 case Triple::Mesa3D: 185 OSABI = ELF::ELFOSABI_AMDGPU_MESA3D; 186 break; 187 default: 188 break; 189 } 190 } 191 192 std::unique_ptr<MCObjectWriter> 193 createObjectWriter(raw_pwrite_stream &OS) const override { 194 return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend, OS); 195 } 196 }; 197 198 } // end anonymous namespace 199 200 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T, 201 const MCRegisterInfo &MRI, 202 const Triple &TT, StringRef CPU, 203 const MCTargetOptions &Options) { 204 // Use 64-bit ELF for amdgcn 205 return new ELFAMDGPUAsmBackend(T, TT); 206 } 207