1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// \file 9 //===----------------------------------------------------------------------===// 10 11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 12 #include "MCTargetDesc/AMDGPUFixupKinds.h" 13 #include "llvm/ADT/StringRef.h" 14 #include "llvm/MC/MCAsmBackend.h" 15 #include "llvm/MC/MCAssembler.h" 16 #include "llvm/MC/MCContext.h" 17 #include "llvm/MC/MCFixupKindInfo.h" 18 #include "llvm/MC/MCObjectWriter.h" 19 #include "llvm/MC/MCValue.h" 20 #include "llvm/Support/TargetRegistry.h" 21 22 using namespace llvm; 23 24 namespace { 25 26 class AMDGPUAsmBackend : public MCAsmBackend { 27 public: 28 AMDGPUAsmBackend(const Target &T) 29 : MCAsmBackend() {} 30 31 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; }; 32 33 void processFixupValue(const MCAssembler &Asm, 34 const MCAsmLayout &Layout, 35 const MCFixup &Fixup, const MCFragment *DF, 36 const MCValue &Target, uint64_t &Value, 37 bool &IsResolved) override; 38 39 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 40 uint64_t Value, bool IsPCRel) const override; 41 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 42 const MCRelaxableFragment *DF, 43 const MCAsmLayout &Layout) const override { 44 return false; 45 } 46 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, 47 MCInst &Res) const override { 48 assert(!"Not implemented"); 49 } 50 bool mayNeedRelaxation(const MCInst &Inst) const override { return false; } 51 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; 52 53 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override; 54 }; 55 56 } //End anonymous namespace 57 58 static unsigned getFixupKindNumBytes(unsigned Kind) { 59 switch (Kind) { 60 case AMDGPU::fixup_si_sopp_br: 61 return 2; 62 case FK_SecRel_1: 63 case FK_Data_1: 64 return 1; 65 case FK_SecRel_2: 66 case FK_Data_2: 67 return 2; 68 case FK_SecRel_4: 69 case FK_Data_4: 70 case FK_PCRel_4: 71 return 4; 72 case FK_SecRel_8: 73 case FK_Data_8: 74 return 8; 75 default: 76 llvm_unreachable("Unknown fixup kind!"); 77 } 78 } 79 80 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 81 MCContext *Ctx) { 82 int64_t SignedValue = static_cast<int64_t>(Value); 83 84 switch (Fixup.getKind()) { 85 case AMDGPU::fixup_si_sopp_br: { 86 int64_t BrImm = (SignedValue - 4) / 4; 87 88 if (Ctx && !isInt<16>(BrImm)) 89 Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16"); 90 91 return BrImm; 92 } 93 case FK_Data_1: 94 case FK_Data_2: 95 case FK_Data_4: 96 case FK_Data_8: 97 case FK_PCRel_4: 98 case FK_SecRel_4: 99 return Value; 100 default: 101 llvm_unreachable("unhandled fixup kind"); 102 } 103 } 104 105 void AMDGPUAsmBackend::processFixupValue(const MCAssembler &Asm, 106 const MCAsmLayout &Layout, 107 const MCFixup &Fixup, const MCFragment *DF, 108 const MCValue &Target, uint64_t &Value, 109 bool &IsResolved) { 110 if (IsResolved) 111 Value = adjustFixupValue(Fixup, Value, &Asm.getContext()); 112 } 113 114 void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, 115 unsigned DataSize, uint64_t Value, 116 bool IsPCRel) const { 117 if (!Value) 118 return; // Doesn't change encoding. 119 120 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); 121 122 // Shift the value into position. 123 Value <<= Info.TargetOffset; 124 125 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 126 uint32_t Offset = Fixup.getOffset(); 127 assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); 128 129 // For each byte of the fragment that the fixup touches, mask in the bits from 130 // the fixup value. 131 for (unsigned i = 0; i != NumBytes; ++i) 132 Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff); 133 } 134 135 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo( 136 MCFixupKind Kind) const { 137 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = { 138 // name offset bits flags 139 { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel }, 140 }; 141 142 if (Kind < FirstTargetFixupKind) 143 return MCAsmBackend::getFixupKindInfo(Kind); 144 145 return Infos[Kind - FirstTargetFixupKind]; 146 } 147 148 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 149 OW->WriteZeros(Count); 150 151 return true; 152 } 153 154 //===----------------------------------------------------------------------===// 155 // ELFAMDGPUAsmBackend class 156 //===----------------------------------------------------------------------===// 157 158 namespace { 159 160 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend { 161 bool Is64Bit; 162 bool HasRelocationAddend; 163 164 public: 165 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) : 166 AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn), 167 HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { } 168 169 MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override { 170 return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS); 171 } 172 }; 173 174 } // end anonymous namespace 175 176 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T, 177 const MCRegisterInfo &MRI, 178 const Triple &TT, StringRef CPU, 179 const MCTargetOptions &Options) { 180 // Use 64-bit ELF for amdgcn 181 return new ELFAMDGPUAsmBackend(T, TT); 182 } 183