1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// \file 9 //===----------------------------------------------------------------------===// 10 11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 12 #include "MCTargetDesc/AMDGPUFixupKinds.h" 13 #include "llvm/ADT/StringRef.h" 14 #include "llvm/MC/MCAsmBackend.h" 15 #include "llvm/MC/MCAssembler.h" 16 #include "llvm/MC/MCFixupKindInfo.h" 17 #include "llvm/MC/MCObjectWriter.h" 18 #include "llvm/MC/MCValue.h" 19 #include "llvm/Support/TargetRegistry.h" 20 21 using namespace llvm; 22 23 namespace { 24 25 class AMDGPUMCObjectWriter : public MCObjectWriter { 26 public: 27 AMDGPUMCObjectWriter(raw_pwrite_stream &OS) : MCObjectWriter(OS, true) {} 28 void executePostLayoutBinding(MCAssembler &Asm, 29 const MCAsmLayout &Layout) override { 30 //XXX: Implement if necessary. 31 } 32 void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout, 33 const MCFragment *Fragment, const MCFixup &Fixup, 34 MCValue Target, bool &IsPCRel, 35 uint64_t &FixedValue) override { 36 assert(!"Not implemented"); 37 } 38 39 void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override; 40 41 }; 42 43 class AMDGPUAsmBackend : public MCAsmBackend { 44 public: 45 AMDGPUAsmBackend(const Target &T) 46 : MCAsmBackend() {} 47 48 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; }; 49 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 50 uint64_t Value, bool IsPCRel) const override; 51 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 52 const MCRelaxableFragment *DF, 53 const MCAsmLayout &Layout) const override { 54 return false; 55 } 56 void relaxInstruction(const MCInst &Inst, MCInst &Res) const override { 57 assert(!"Not implemented"); 58 } 59 bool mayNeedRelaxation(const MCInst &Inst) const override { return false; } 60 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; 61 62 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override; 63 }; 64 65 } //End anonymous namespace 66 67 void AMDGPUMCObjectWriter::writeObject(MCAssembler &Asm, 68 const MCAsmLayout &Layout) { 69 for (MCAssembler::iterator I = Asm.begin(), E = Asm.end(); I != E; ++I) { 70 Asm.writeSectionData(&*I, Layout); 71 } 72 } 73 74 static unsigned getFixupKindNumBytes(unsigned Kind) { 75 switch (Kind) { 76 case FK_SecRel_1: 77 case FK_Data_1: 78 return 1; 79 case FK_SecRel_2: 80 case FK_Data_2: 81 return 2; 82 case FK_SecRel_4: 83 case FK_Data_4: 84 case FK_PCRel_4: 85 return 4; 86 case FK_SecRel_8: 87 case FK_Data_8: 88 return 8; 89 default: 90 llvm_unreachable("Unknown fixup kind!"); 91 } 92 } 93 94 void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, 95 unsigned DataSize, uint64_t Value, 96 bool IsPCRel) const { 97 98 switch ((unsigned)Fixup.getKind()) { 99 case AMDGPU::fixup_si_sopp_br: { 100 int64_t BrImm = ((int64_t)Value - 4) / 4; 101 if (!isInt<16>(BrImm)) 102 report_fatal_error("branch size exceeds simm16"); 103 104 uint16_t *Dst = (uint16_t*)(Data + Fixup.getOffset()); 105 *Dst = BrImm; 106 break; 107 } 108 109 default: { 110 // FIXME: Copied from AArch64 111 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 112 if (!Value) 113 return; // Doesn't change encoding. 114 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); 115 116 // Shift the value into position. 117 Value <<= Info.TargetOffset; 118 119 unsigned Offset = Fixup.getOffset(); 120 assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); 121 122 // For each byte of the fragment that the fixup touches, mask in the 123 // bits from the fixup value. 124 for (unsigned i = 0; i != NumBytes; ++i) 125 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); 126 } 127 } 128 } 129 130 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo( 131 MCFixupKind Kind) const { 132 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = { 133 // name offset bits flags 134 { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel }, 135 }; 136 137 if (Kind < FirstTargetFixupKind) 138 return MCAsmBackend::getFixupKindInfo(Kind); 139 140 return Infos[Kind - FirstTargetFixupKind]; 141 } 142 143 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 144 OW->WriteZeros(Count); 145 146 return true; 147 } 148 149 //===----------------------------------------------------------------------===// 150 // ELFAMDGPUAsmBackend class 151 //===----------------------------------------------------------------------===// 152 153 namespace { 154 155 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend { 156 bool Is64Bit; 157 bool HasRelocationAddend; 158 159 public: 160 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) : 161 AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn), 162 HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { } 163 164 MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override { 165 return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS); 166 } 167 }; 168 169 } // end anonymous namespace 170 171 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T, 172 const MCRegisterInfo &MRI, 173 const Triple &TT, StringRef CPU) { 174 // Use 64-bit ELF for amdgcn 175 return new ELFAMDGPUAsmBackend(T, TT); 176 } 177