1 //===-- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information--------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPU.h"
11 #include "SIDefines.h"
12 #include "llvm/IR/LLVMContext.h"
13 #include "llvm/IR/Function.h"
14 #include "llvm/IR/GlobalValue.h"
15 #include "llvm/MC/MCContext.h"
16 #include "llvm/MC/MCInstrInfo.h"
17 #include "llvm/MC/MCRegisterInfo.h"
18 #include "llvm/MC/MCSectionELF.h"
19 #include "llvm/MC/MCSubtargetInfo.h"
20 #include "llvm/MC/SubtargetFeature.h"
21 
22 #define GET_SUBTARGETINFO_ENUM
23 #include "AMDGPUGenSubtargetInfo.inc"
24 #undef GET_SUBTARGETINFO_ENUM
25 
26 #define GET_REGINFO_ENUM
27 #include "AMDGPUGenRegisterInfo.inc"
28 #undef GET_REGINFO_ENUM
29 
30 #define GET_INSTRINFO_NAMED_OPS
31 #define GET_INSTRINFO_ENUM
32 #include "AMDGPUGenInstrInfo.inc"
33 #undef GET_INSTRINFO_NAMED_OPS
34 #undef GET_INSTRINFO_ENUM
35 
36 namespace llvm {
37 namespace AMDGPU {
38 
39 IsaVersion getIsaVersion(const FeatureBitset &Features) {
40 
41   if (Features.test(FeatureISAVersion7_0_0))
42     return {7, 0, 0};
43 
44   if (Features.test(FeatureISAVersion7_0_1))
45     return {7, 0, 1};
46 
47   if (Features.test(FeatureISAVersion8_0_0))
48     return {8, 0, 0};
49 
50   if (Features.test(FeatureISAVersion8_0_1))
51     return {8, 0, 1};
52 
53   if (Features.test(FeatureISAVersion8_0_3))
54     return {8, 0, 3};
55 
56   return {0, 0, 0};
57 }
58 
59 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
60                                const FeatureBitset &Features) {
61 
62   IsaVersion ISA = getIsaVersion(Features);
63 
64   memset(&Header, 0, sizeof(Header));
65 
66   Header.amd_kernel_code_version_major = 1;
67   Header.amd_kernel_code_version_minor = 0;
68   Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
69   Header.amd_machine_version_major = ISA.Major;
70   Header.amd_machine_version_minor = ISA.Minor;
71   Header.amd_machine_version_stepping = ISA.Stepping;
72   Header.kernel_code_entry_byte_offset = sizeof(Header);
73   // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
74   Header.wavefront_size = 6;
75   // These alignment values are specified in powers of two, so alignment =
76   // 2^n.  The minimum alignment is 2^4 = 16.
77   Header.kernarg_segment_alignment = 4;
78   Header.group_segment_alignment = 4;
79   Header.private_segment_alignment = 4;
80 }
81 
82 MCSection *getHSATextSection(MCContext &Ctx) {
83   return Ctx.getELFSection(".hsatext", ELF::SHT_PROGBITS,
84                            ELF::SHF_ALLOC | ELF::SHF_WRITE |
85                            ELF::SHF_EXECINSTR |
86                            ELF::SHF_AMDGPU_HSA_AGENT |
87                            ELF::SHF_AMDGPU_HSA_CODE);
88 }
89 
90 MCSection *getHSADataGlobalAgentSection(MCContext &Ctx) {
91   return Ctx.getELFSection(".hsadata_global_agent", ELF::SHT_PROGBITS,
92                            ELF::SHF_ALLOC | ELF::SHF_WRITE |
93                            ELF::SHF_AMDGPU_HSA_GLOBAL |
94                            ELF::SHF_AMDGPU_HSA_AGENT);
95 }
96 
97 MCSection *getHSADataGlobalProgramSection(MCContext &Ctx) {
98   return  Ctx.getELFSection(".hsadata_global_program", ELF::SHT_PROGBITS,
99                             ELF::SHF_ALLOC | ELF::SHF_WRITE |
100                             ELF::SHF_AMDGPU_HSA_GLOBAL);
101 }
102 
103 MCSection *getHSARodataReadonlyAgentSection(MCContext &Ctx) {
104   return Ctx.getELFSection(".hsarodata_readonly_agent", ELF::SHT_PROGBITS,
105                            ELF::SHF_ALLOC | ELF::SHF_AMDGPU_HSA_READONLY |
106                            ELF::SHF_AMDGPU_HSA_AGENT);
107 }
108 
109 bool isGroupSegment(const GlobalValue *GV) {
110   return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
111 }
112 
113 bool isGlobalSegment(const GlobalValue *GV) {
114   return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
115 }
116 
117 bool isReadOnlySegment(const GlobalValue *GV) {
118   return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
119 }
120 
121 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
122   Attribute A = F.getFnAttribute(Name);
123   int Result = Default;
124 
125   if (A.isStringAttribute()) {
126     StringRef Str = A.getValueAsString();
127     if (Str.getAsInteger(0, Result)) {
128       LLVMContext &Ctx = F.getContext();
129       Ctx.emitError("can't parse integer attribute " + Name);
130     }
131   }
132 
133   return Result;
134 }
135 
136 std::pair<int, int> getIntegerPairAttribute(const Function &F,
137                                             StringRef Name,
138                                             std::pair<int, int> Default,
139                                             bool OnlyFirstRequired) {
140   Attribute A = F.getFnAttribute(Name);
141   if (!A.isStringAttribute())
142     return Default;
143 
144   LLVMContext &Ctx = F.getContext();
145   std::pair<int, int> Ints = Default;
146   std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
147   if (Strs.first.trim().getAsInteger(0, Ints.first)) {
148     Ctx.emitError("can't parse first integer attribute " + Name);
149     return Default;
150   }
151   if (Strs.second.trim().getAsInteger(0, Ints.second)) {
152     if (!OnlyFirstRequired || Strs.second.trim().size()) {
153       Ctx.emitError("can't parse second integer attribute " + Name);
154       return Default;
155     }
156   }
157 
158   return Ints;
159 }
160 
161 unsigned getVmcntMask(IsaVersion Version) {
162   return 0xf;
163 }
164 
165 unsigned getVmcntShift(IsaVersion Version) {
166   return 0;
167 }
168 
169 unsigned getExpcntMask(IsaVersion Version) {
170   return 0x7;
171 }
172 
173 unsigned getExpcntShift(IsaVersion Version) {
174   return 4;
175 }
176 
177 unsigned getLgkmcntMask(IsaVersion Version) {
178   return 0xf;
179 }
180 
181 unsigned getLgkmcntShift(IsaVersion Version) {
182   return 8;
183 }
184 
185 unsigned getInitialPSInputAddr(const Function &F) {
186   return getIntegerAttribute(F, "InitialPSInputAddr", 0);
187 }
188 
189 bool isShader(CallingConv::ID cc) {
190   switch(cc) {
191     case CallingConv::AMDGPU_VS:
192     case CallingConv::AMDGPU_GS:
193     case CallingConv::AMDGPU_PS:
194     case CallingConv::AMDGPU_CS:
195       return true;
196     default:
197       return false;
198   }
199 }
200 
201 bool isCompute(CallingConv::ID cc) {
202   return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
203 }
204 
205 bool isSI(const MCSubtargetInfo &STI) {
206   return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
207 }
208 
209 bool isCI(const MCSubtargetInfo &STI) {
210   return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
211 }
212 
213 bool isVI(const MCSubtargetInfo &STI) {
214   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
215 }
216 
217 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
218 
219   switch(Reg) {
220   default: break;
221   case AMDGPU::FLAT_SCR:
222     assert(!isSI(STI));
223     return isCI(STI) ? AMDGPU::FLAT_SCR_ci : AMDGPU::FLAT_SCR_vi;
224 
225   case AMDGPU::FLAT_SCR_LO:
226     assert(!isSI(STI));
227     return isCI(STI) ? AMDGPU::FLAT_SCR_LO_ci : AMDGPU::FLAT_SCR_LO_vi;
228 
229   case AMDGPU::FLAT_SCR_HI:
230     assert(!isSI(STI));
231     return isCI(STI) ? AMDGPU::FLAT_SCR_HI_ci : AMDGPU::FLAT_SCR_HI_vi;
232   }
233   return Reg;
234 }
235 
236 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
237   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
238 
239   return OpType == AMDGPU::OPERAND_REG_IMM32_INT ||
240          OpType == AMDGPU::OPERAND_REG_IMM32_FP ||
241          OpType == AMDGPU::OPERAND_REG_INLINE_C_INT ||
242          OpType == AMDGPU::OPERAND_REG_INLINE_C_FP;
243 }
244 
245 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
246   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
247 
248   return OpType == AMDGPU::OPERAND_REG_IMM32_FP ||
249          OpType == AMDGPU::OPERAND_REG_INLINE_C_FP;
250 }
251 
252 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
253   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
254 
255   return OpType == AMDGPU::OPERAND_REG_INLINE_C_INT ||
256          OpType == AMDGPU::OPERAND_REG_INLINE_C_FP;
257 }
258 
259 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
260                            unsigned OpNo) {
261   int RCID = Desc.OpInfo[OpNo].RegClass;
262   const MCRegisterClass &RC = MRI->getRegClass(RCID);
263   return RC.getSize();
264 }
265 
266 bool isInlinableLiteral64(int64_t Literal, bool IsVI) {
267   if (Literal >= -16 && Literal <= 64)
268     return true;
269 
270   double D = BitsToDouble(Literal);
271 
272   if (D == 0.5 || D == -0.5 ||
273       D == 1.0 || D == -1.0 ||
274       D == 2.0 || D == -2.0 ||
275       D == 4.0 || D == -4.0)
276     return true;
277 
278   if (IsVI && Literal == 0x3fc45f306dc9c882)
279     return true;
280 
281   return false;
282 }
283 
284 bool isInlinableLiteral32(int32_t Literal, bool IsVI) {
285   if (Literal >= -16 && Literal <= 64)
286     return true;
287 
288   float F = BitsToFloat(Literal);
289 
290   if (F == 0.5 || F == -0.5 ||
291       F == 1.0 || F == -1.0 ||
292       F == 2.0 || F == -2.0 ||
293       F == 4.0 || F == -4.0)
294     return true;
295 
296   if (IsVI && Literal == 0x3e22f983)
297     return true;
298 
299   return false;
300 }
301 
302 
303 } // End namespace AMDGPU
304 } // End namespace llvm
305