1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "AMDGPUBaseInfo.h"
11 #include "AMDGPUTargetTransformInfo.h"
12 #include "AMDGPU.h"
13 #include "SIDefines.h"
14 #include "llvm/ADT/StringRef.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/BinaryFormat/ELF.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/IR/Attributes.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/MC/MCInstrInfo.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSectionELF.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/SubtargetFeature.h"
32 #include "llvm/Support/Casting.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include <algorithm>
36 #include <cassert>
37 #include <cstdint>
38 #include <cstring>
39 #include <utility>
40 
41 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
42 
43 #define GET_INSTRINFO_NAMED_OPS
44 #define GET_INSTRMAP_INFO
45 #include "AMDGPUGenInstrInfo.inc"
46 #undef GET_INSTRMAP_INFO
47 #undef GET_INSTRINFO_NAMED_OPS
48 
49 namespace {
50 
51 /// \returns Bit mask for given bit \p Shift and bit \p Width.
52 unsigned getBitMask(unsigned Shift, unsigned Width) {
53   return ((1 << Width) - 1) << Shift;
54 }
55 
56 /// \brief Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
57 ///
58 /// \returns Packed \p Dst.
59 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
60   Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
61   Dst |= (Src << Shift) & getBitMask(Shift, Width);
62   return Dst;
63 }
64 
65 /// \brief Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
66 ///
67 /// \returns Unpacked bits.
68 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
69   return (Src & getBitMask(Shift, Width)) >> Shift;
70 }
71 
72 /// \returns Vmcnt bit shift (lower bits).
73 unsigned getVmcntBitShiftLo() { return 0; }
74 
75 /// \returns Vmcnt bit width (lower bits).
76 unsigned getVmcntBitWidthLo() { return 4; }
77 
78 /// \returns Expcnt bit shift.
79 unsigned getExpcntBitShift() { return 4; }
80 
81 /// \returns Expcnt bit width.
82 unsigned getExpcntBitWidth() { return 3; }
83 
84 /// \returns Lgkmcnt bit shift.
85 unsigned getLgkmcntBitShift() { return 8; }
86 
87 /// \returns Lgkmcnt bit width.
88 unsigned getLgkmcntBitWidth() { return 4; }
89 
90 /// \returns Vmcnt bit shift (higher bits).
91 unsigned getVmcntBitShiftHi() { return 14; }
92 
93 /// \returns Vmcnt bit width (higher bits).
94 unsigned getVmcntBitWidthHi() { return 2; }
95 
96 } // end namespace anonymous
97 
98 namespace llvm {
99 
100 static cl::opt<bool> EnablePackedInlinableLiterals(
101     "enable-packed-inlinable-literals",
102     cl::desc("Enable packed inlinable literals (v2f16, v2i16)"),
103     cl::init(false));
104 
105 namespace AMDGPU {
106 
107 LLVM_READNONE
108 static inline Channels indexToChannel(unsigned Channel) {
109   switch (Channel) {
110   case 1:
111     return AMDGPU::Channels_1;
112   case 2:
113     return AMDGPU::Channels_2;
114   case 3:
115     return AMDGPU::Channels_3;
116   case 4:
117     return AMDGPU::Channels_4;
118   default:
119     llvm_unreachable("invalid MIMG channel");
120   }
121 }
122 
123 
124 // FIXME: Need to handle d16 images correctly.
125 static unsigned rcToChannels(unsigned RCID) {
126   switch (RCID) {
127   case AMDGPU::VGPR_32RegClassID:
128     return 1;
129   case AMDGPU::VReg_64RegClassID:
130     return 2;
131   case AMDGPU::VReg_96RegClassID:
132     return 3;
133   case AMDGPU::VReg_128RegClassID:
134     return 4;
135   default:
136     llvm_unreachable("invalid MIMG register class");
137   }
138 }
139 
140 int getMaskedMIMGOp(const MCInstrInfo &MII, unsigned Opc, unsigned NewChannels) {
141   AMDGPU::Channels Channel = AMDGPU::indexToChannel(NewChannels);
142   unsigned OrigChannels = rcToChannels(MII.get(Opc).OpInfo[0].RegClass);
143   if (NewChannels == OrigChannels)
144     return Opc;
145 
146   switch (OrigChannels) {
147   case 1:
148     return AMDGPU::getMaskedMIMGOp1(Opc, Channel);
149   case 2:
150     return AMDGPU::getMaskedMIMGOp2(Opc, Channel);
151   case 3:
152     return AMDGPU::getMaskedMIMGOp3(Opc, Channel);
153   case 4:
154     return AMDGPU::getMaskedMIMGOp4(Opc, Channel);
155   default:
156     llvm_unreachable("invalid MIMG channel");
157   }
158 }
159 
160 int getMaskedMIMGAtomicOp(const MCInstrInfo &MII, unsigned Opc, unsigned NewChannels) {
161   assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst) != -1);
162   assert(NewChannels == 1 || NewChannels == 2 || NewChannels == 4);
163 
164   unsigned OrigChannels = rcToChannels(MII.get(Opc).OpInfo[0].RegClass);
165   assert(OrigChannels == 1 || OrigChannels == 2 || OrigChannels == 4);
166 
167   if (NewChannels == OrigChannels) return Opc;
168 
169   if (OrigChannels <= 2 && NewChannels <= 2) {
170     // This is an ordinary atomic (not an atomic_cmpswap)
171     return (OrigChannels == 1)?
172       AMDGPU::getMIMGAtomicOp1(Opc) : AMDGPU::getMIMGAtomicOp2(Opc);
173   } else if (OrigChannels >= 2 && NewChannels >= 2) {
174     // This is an atomic_cmpswap
175     return (OrigChannels == 2)?
176       AMDGPU::getMIMGAtomicOp1(Opc) : AMDGPU::getMIMGAtomicOp2(Opc);
177   } else { // invalid OrigChannels/NewChannels value
178     return -1;
179   }
180 }
181 
182 // Wrapper for Tablegen'd function.  enum Subtarget is not defined in any
183 // header files, so we need to wrap it in a function that takes unsigned
184 // instead.
185 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
186   return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
187 }
188 
189 namespace IsaInfo {
190 
191 IsaVersion getIsaVersion(const FeatureBitset &Features) {
192   // GCN GFX6 (Southern Islands (SI)).
193   if (Features.test(FeatureISAVersion6_0_0))
194     return {6, 0, 0};
195   if (Features.test(FeatureISAVersion6_0_1))
196     return {6, 0, 1};
197 
198   // GCN GFX7 (Sea Islands (CI)).
199   if (Features.test(FeatureISAVersion7_0_0))
200     return {7, 0, 0};
201   if (Features.test(FeatureISAVersion7_0_1))
202     return {7, 0, 1};
203   if (Features.test(FeatureISAVersion7_0_2))
204     return {7, 0, 2};
205   if (Features.test(FeatureISAVersion7_0_3))
206     return {7, 0, 3};
207   if (Features.test(FeatureISAVersion7_0_4))
208     return {7, 0, 4};
209 
210   // GCN GFX8 (Volcanic Islands (VI)).
211   if (Features.test(FeatureISAVersion8_0_1))
212     return {8, 0, 1};
213   if (Features.test(FeatureISAVersion8_0_2))
214     return {8, 0, 2};
215   if (Features.test(FeatureISAVersion8_0_3))
216     return {8, 0, 3};
217   if (Features.test(FeatureISAVersion8_1_0))
218     return {8, 1, 0};
219 
220   // GCN GFX9.
221   if (Features.test(FeatureISAVersion9_0_0))
222     return {9, 0, 0};
223   if (Features.test(FeatureISAVersion9_0_2))
224     return {9, 0, 2};
225 
226   if (!Features.test(FeatureGCN) || Features.test(FeatureSouthernIslands))
227     return {0, 0, 0};
228   return {7, 0, 0};
229 }
230 
231 void streamIsaVersion(const MCSubtargetInfo *STI, raw_ostream &Stream) {
232   auto TargetTriple = STI->getTargetTriple();
233   auto ISAVersion = IsaInfo::getIsaVersion(STI->getFeatureBits());
234 
235   Stream << TargetTriple.getArchName() << '-'
236          << TargetTriple.getVendorName() << '-'
237          << TargetTriple.getOSName() << '-'
238          << TargetTriple.getEnvironmentName() << '-'
239          << "gfx"
240          << ISAVersion.Major
241          << ISAVersion.Minor
242          << ISAVersion.Stepping;
243   Stream.flush();
244 }
245 
246 bool hasCodeObjectV3(const FeatureBitset &Features) {
247   return Features.test(FeatureCodeObjectV3);
248 }
249 
250 unsigned getWavefrontSize(const FeatureBitset &Features) {
251   if (Features.test(FeatureWavefrontSize16))
252     return 16;
253   if (Features.test(FeatureWavefrontSize32))
254     return 32;
255 
256   return 64;
257 }
258 
259 unsigned getLocalMemorySize(const FeatureBitset &Features) {
260   if (Features.test(FeatureLocalMemorySize32768))
261     return 32768;
262   if (Features.test(FeatureLocalMemorySize65536))
263     return 65536;
264 
265   return 0;
266 }
267 
268 unsigned getEUsPerCU(const FeatureBitset &Features) {
269   return 4;
270 }
271 
272 unsigned getMaxWorkGroupsPerCU(const FeatureBitset &Features,
273                                unsigned FlatWorkGroupSize) {
274   if (!Features.test(FeatureGCN))
275     return 8;
276   unsigned N = getWavesPerWorkGroup(Features, FlatWorkGroupSize);
277   if (N == 1)
278     return 40;
279   N = 40 / N;
280   return std::min(N, 16u);
281 }
282 
283 unsigned getMaxWavesPerCU(const FeatureBitset &Features) {
284   return getMaxWavesPerEU(Features) * getEUsPerCU(Features);
285 }
286 
287 unsigned getMaxWavesPerCU(const FeatureBitset &Features,
288                           unsigned FlatWorkGroupSize) {
289   return getWavesPerWorkGroup(Features, FlatWorkGroupSize);
290 }
291 
292 unsigned getMinWavesPerEU(const FeatureBitset &Features) {
293   return 1;
294 }
295 
296 unsigned getMaxWavesPerEU(const FeatureBitset &Features) {
297   if (!Features.test(FeatureGCN))
298     return 8;
299   // FIXME: Need to take scratch memory into account.
300   return 10;
301 }
302 
303 unsigned getMaxWavesPerEU(const FeatureBitset &Features,
304                           unsigned FlatWorkGroupSize) {
305   return alignTo(getMaxWavesPerCU(Features, FlatWorkGroupSize),
306                  getEUsPerCU(Features)) / getEUsPerCU(Features);
307 }
308 
309 unsigned getMinFlatWorkGroupSize(const FeatureBitset &Features) {
310   return 1;
311 }
312 
313 unsigned getMaxFlatWorkGroupSize(const FeatureBitset &Features) {
314   return 2048;
315 }
316 
317 unsigned getWavesPerWorkGroup(const FeatureBitset &Features,
318                               unsigned FlatWorkGroupSize) {
319   return alignTo(FlatWorkGroupSize, getWavefrontSize(Features)) /
320                  getWavefrontSize(Features);
321 }
322 
323 unsigned getSGPRAllocGranule(const FeatureBitset &Features) {
324   IsaVersion Version = getIsaVersion(Features);
325   if (Version.Major >= 8)
326     return 16;
327   return 8;
328 }
329 
330 unsigned getSGPREncodingGranule(const FeatureBitset &Features) {
331   return 8;
332 }
333 
334 unsigned getTotalNumSGPRs(const FeatureBitset &Features) {
335   IsaVersion Version = getIsaVersion(Features);
336   if (Version.Major >= 8)
337     return 800;
338   return 512;
339 }
340 
341 unsigned getAddressableNumSGPRs(const FeatureBitset &Features) {
342   if (Features.test(FeatureSGPRInitBug))
343     return FIXED_NUM_SGPRS_FOR_INIT_BUG;
344 
345   IsaVersion Version = getIsaVersion(Features);
346   if (Version.Major >= 8)
347     return 102;
348   return 104;
349 }
350 
351 unsigned getMinNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
352   assert(WavesPerEU != 0);
353 
354   if (WavesPerEU >= getMaxWavesPerEU(Features))
355     return 0;
356   unsigned MinNumSGPRs =
357       alignDown(getTotalNumSGPRs(Features) / (WavesPerEU + 1),
358                 getSGPRAllocGranule(Features)) + 1;
359   return std::min(MinNumSGPRs, getAddressableNumSGPRs(Features));
360 }
361 
362 unsigned getMaxNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU,
363                         bool Addressable) {
364   assert(WavesPerEU != 0);
365 
366   IsaVersion Version = getIsaVersion(Features);
367   unsigned MaxNumSGPRs = alignDown(getTotalNumSGPRs(Features) / WavesPerEU,
368                                    getSGPRAllocGranule(Features));
369   unsigned AddressableNumSGPRs = getAddressableNumSGPRs(Features);
370   if (Version.Major >= 8 && !Addressable)
371     AddressableNumSGPRs = 112;
372   return std::min(MaxNumSGPRs, AddressableNumSGPRs);
373 }
374 
375 unsigned getVGPRAllocGranule(const FeatureBitset &Features) {
376   return 4;
377 }
378 
379 unsigned getVGPREncodingGranule(const FeatureBitset &Features) {
380   return getVGPRAllocGranule(Features);
381 }
382 
383 unsigned getTotalNumVGPRs(const FeatureBitset &Features) {
384   return 256;
385 }
386 
387 unsigned getAddressableNumVGPRs(const FeatureBitset &Features) {
388   return getTotalNumVGPRs(Features);
389 }
390 
391 unsigned getMinNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
392   assert(WavesPerEU != 0);
393 
394   if (WavesPerEU >= getMaxWavesPerEU(Features))
395     return 0;
396   unsigned MinNumVGPRs =
397       alignDown(getTotalNumVGPRs(Features) / (WavesPerEU + 1),
398                 getVGPRAllocGranule(Features)) + 1;
399   return std::min(MinNumVGPRs, getAddressableNumVGPRs(Features));
400 }
401 
402 unsigned getMaxNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
403   assert(WavesPerEU != 0);
404 
405   unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(Features) / WavesPerEU,
406                                    getVGPRAllocGranule(Features));
407   unsigned AddressableNumVGPRs = getAddressableNumVGPRs(Features);
408   return std::min(MaxNumVGPRs, AddressableNumVGPRs);
409 }
410 
411 } // end namespace IsaInfo
412 
413 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
414                                const FeatureBitset &Features) {
415   IsaInfo::IsaVersion ISA = IsaInfo::getIsaVersion(Features);
416 
417   memset(&Header, 0, sizeof(Header));
418 
419   Header.amd_kernel_code_version_major = 1;
420   Header.amd_kernel_code_version_minor = 1;
421   Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
422   Header.amd_machine_version_major = ISA.Major;
423   Header.amd_machine_version_minor = ISA.Minor;
424   Header.amd_machine_version_stepping = ISA.Stepping;
425   Header.kernel_code_entry_byte_offset = sizeof(Header);
426   // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
427   Header.wavefront_size = 6;
428 
429   // If the code object does not support indirect functions, then the value must
430   // be 0xffffffff.
431   Header.call_convention = -1;
432 
433   // These alignment values are specified in powers of two, so alignment =
434   // 2^n.  The minimum alignment is 2^4 = 16.
435   Header.kernarg_segment_alignment = 4;
436   Header.group_segment_alignment = 4;
437   Header.private_segment_alignment = 4;
438 }
439 
440 bool isGroupSegment(const GlobalValue *GV) {
441   return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
442 }
443 
444 bool isGlobalSegment(const GlobalValue *GV) {
445   return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
446 }
447 
448 bool isReadOnlySegment(const GlobalValue *GV) {
449   return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
450          GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
451 }
452 
453 bool shouldEmitConstantsToTextSection(const Triple &TT) {
454   return TT.getOS() != Triple::AMDHSA;
455 }
456 
457 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
458   Attribute A = F.getFnAttribute(Name);
459   int Result = Default;
460 
461   if (A.isStringAttribute()) {
462     StringRef Str = A.getValueAsString();
463     if (Str.getAsInteger(0, Result)) {
464       LLVMContext &Ctx = F.getContext();
465       Ctx.emitError("can't parse integer attribute " + Name);
466     }
467   }
468 
469   return Result;
470 }
471 
472 std::pair<int, int> getIntegerPairAttribute(const Function &F,
473                                             StringRef Name,
474                                             std::pair<int, int> Default,
475                                             bool OnlyFirstRequired) {
476   Attribute A = F.getFnAttribute(Name);
477   if (!A.isStringAttribute())
478     return Default;
479 
480   LLVMContext &Ctx = F.getContext();
481   std::pair<int, int> Ints = Default;
482   std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
483   if (Strs.first.trim().getAsInteger(0, Ints.first)) {
484     Ctx.emitError("can't parse first integer attribute " + Name);
485     return Default;
486   }
487   if (Strs.second.trim().getAsInteger(0, Ints.second)) {
488     if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
489       Ctx.emitError("can't parse second integer attribute " + Name);
490       return Default;
491     }
492   }
493 
494   return Ints;
495 }
496 
497 unsigned getVmcntBitMask(const IsaInfo::IsaVersion &Version) {
498   unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1;
499   if (Version.Major < 9)
500     return VmcntLo;
501 
502   unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
503   return VmcntLo | VmcntHi;
504 }
505 
506 unsigned getExpcntBitMask(const IsaInfo::IsaVersion &Version) {
507   return (1 << getExpcntBitWidth()) - 1;
508 }
509 
510 unsigned getLgkmcntBitMask(const IsaInfo::IsaVersion &Version) {
511   return (1 << getLgkmcntBitWidth()) - 1;
512 }
513 
514 unsigned getWaitcntBitMask(const IsaInfo::IsaVersion &Version) {
515   unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
516   unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
517   unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), getLgkmcntBitWidth());
518   unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt;
519   if (Version.Major < 9)
520     return Waitcnt;
521 
522   unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
523   return Waitcnt | VmcntHi;
524 }
525 
526 unsigned decodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
527   unsigned VmcntLo =
528       unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
529   if (Version.Major < 9)
530     return VmcntLo;
531 
532   unsigned VmcntHi =
533       unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
534   VmcntHi <<= getVmcntBitWidthLo();
535   return VmcntLo | VmcntHi;
536 }
537 
538 unsigned decodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
539   return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
540 }
541 
542 unsigned decodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
543   return unpackBits(Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
544 }
545 
546 void decodeWaitcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
547                    unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
548   Vmcnt = decodeVmcnt(Version, Waitcnt);
549   Expcnt = decodeExpcnt(Version, Waitcnt);
550   Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
551 }
552 
553 unsigned encodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
554                      unsigned Vmcnt) {
555   Waitcnt =
556       packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
557   if (Version.Major < 9)
558     return Waitcnt;
559 
560   Vmcnt >>= getVmcntBitWidthLo();
561   return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
562 }
563 
564 unsigned encodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
565                       unsigned Expcnt) {
566   return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
567 }
568 
569 unsigned encodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
570                        unsigned Lgkmcnt) {
571   return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
572 }
573 
574 unsigned encodeWaitcnt(const IsaInfo::IsaVersion &Version,
575                        unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
576   unsigned Waitcnt = getWaitcntBitMask(Version);
577   Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
578   Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
579   Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
580   return Waitcnt;
581 }
582 
583 unsigned getInitialPSInputAddr(const Function &F) {
584   return getIntegerAttribute(F, "InitialPSInputAddr", 0);
585 }
586 
587 bool isShader(CallingConv::ID cc) {
588   switch(cc) {
589     case CallingConv::AMDGPU_VS:
590     case CallingConv::AMDGPU_LS:
591     case CallingConv::AMDGPU_HS:
592     case CallingConv::AMDGPU_ES:
593     case CallingConv::AMDGPU_GS:
594     case CallingConv::AMDGPU_PS:
595     case CallingConv::AMDGPU_CS:
596       return true;
597     default:
598       return false;
599   }
600 }
601 
602 bool isCompute(CallingConv::ID cc) {
603   return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
604 }
605 
606 bool isEntryFunctionCC(CallingConv::ID CC) {
607   switch (CC) {
608   case CallingConv::AMDGPU_KERNEL:
609   case CallingConv::SPIR_KERNEL:
610   case CallingConv::AMDGPU_VS:
611   case CallingConv::AMDGPU_GS:
612   case CallingConv::AMDGPU_PS:
613   case CallingConv::AMDGPU_CS:
614   case CallingConv::AMDGPU_ES:
615   case CallingConv::AMDGPU_HS:
616   case CallingConv::AMDGPU_LS:
617     return true;
618   default:
619     return false;
620   }
621 }
622 
623 bool hasXNACK(const MCSubtargetInfo &STI) {
624   return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
625 }
626 
627 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
628   return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128];
629 }
630 
631 bool hasPackedD16(const MCSubtargetInfo &STI) {
632   return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem];
633 }
634 
635 bool isSI(const MCSubtargetInfo &STI) {
636   return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
637 }
638 
639 bool isCI(const MCSubtargetInfo &STI) {
640   return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
641 }
642 
643 bool isVI(const MCSubtargetInfo &STI) {
644   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
645 }
646 
647 bool isGFX9(const MCSubtargetInfo &STI) {
648   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
649 }
650 
651 bool isGCN3Encoding(const MCSubtargetInfo &STI) {
652   return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
653 }
654 
655 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
656   const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
657   const unsigned FirstSubReg = TRI->getSubReg(Reg, 1);
658   return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
659     Reg == AMDGPU::SCC;
660 }
661 
662 bool isRegIntersect(unsigned Reg0, unsigned Reg1, const MCRegisterInfo* TRI) {
663   for (MCRegAliasIterator R(Reg0, TRI, true); R.isValid(); ++R) {
664     if (*R == Reg1) return true;
665   }
666   return false;
667 }
668 
669 #define MAP_REG2REG \
670   using namespace AMDGPU; \
671   switch(Reg) { \
672   default: return Reg; \
673   CASE_CI_VI(FLAT_SCR) \
674   CASE_CI_VI(FLAT_SCR_LO) \
675   CASE_CI_VI(FLAT_SCR_HI) \
676   CASE_VI_GFX9(TTMP0) \
677   CASE_VI_GFX9(TTMP1) \
678   CASE_VI_GFX9(TTMP2) \
679   CASE_VI_GFX9(TTMP3) \
680   CASE_VI_GFX9(TTMP4) \
681   CASE_VI_GFX9(TTMP5) \
682   CASE_VI_GFX9(TTMP6) \
683   CASE_VI_GFX9(TTMP7) \
684   CASE_VI_GFX9(TTMP8) \
685   CASE_VI_GFX9(TTMP9) \
686   CASE_VI_GFX9(TTMP10) \
687   CASE_VI_GFX9(TTMP11) \
688   CASE_VI_GFX9(TTMP12) \
689   CASE_VI_GFX9(TTMP13) \
690   CASE_VI_GFX9(TTMP14) \
691   CASE_VI_GFX9(TTMP15) \
692   CASE_VI_GFX9(TTMP0_TTMP1) \
693   CASE_VI_GFX9(TTMP2_TTMP3) \
694   CASE_VI_GFX9(TTMP4_TTMP5) \
695   CASE_VI_GFX9(TTMP6_TTMP7) \
696   CASE_VI_GFX9(TTMP8_TTMP9) \
697   CASE_VI_GFX9(TTMP10_TTMP11) \
698   CASE_VI_GFX9(TTMP12_TTMP13) \
699   CASE_VI_GFX9(TTMP14_TTMP15) \
700   CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3) \
701   CASE_VI_GFX9(TTMP4_TTMP5_TTMP6_TTMP7) \
702   CASE_VI_GFX9(TTMP8_TTMP9_TTMP10_TTMP11) \
703   CASE_VI_GFX9(TTMP12_TTMP13_TTMP14_TTMP15) \
704   CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
705   CASE_VI_GFX9(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
706   CASE_VI_GFX9(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
707   CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
708   }
709 
710 #define CASE_CI_VI(node) \
711   assert(!isSI(STI)); \
712   case node: return isCI(STI) ? node##_ci : node##_vi;
713 
714 #define CASE_VI_GFX9(node) \
715   case node: return isGFX9(STI) ? node##_gfx9 : node##_vi;
716 
717 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
718   MAP_REG2REG
719 }
720 
721 #undef CASE_CI_VI
722 #undef CASE_VI_GFX9
723 
724 #define CASE_CI_VI(node)   case node##_ci: case node##_vi:   return node;
725 #define CASE_VI_GFX9(node) case node##_vi: case node##_gfx9: return node;
726 
727 unsigned mc2PseudoReg(unsigned Reg) {
728   MAP_REG2REG
729 }
730 
731 #undef CASE_CI_VI
732 #undef CASE_VI_GFX9
733 #undef MAP_REG2REG
734 
735 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
736   assert(OpNo < Desc.NumOperands);
737   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
738   return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
739          OpType <= AMDGPU::OPERAND_SRC_LAST;
740 }
741 
742 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
743   assert(OpNo < Desc.NumOperands);
744   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
745   switch (OpType) {
746   case AMDGPU::OPERAND_REG_IMM_FP32:
747   case AMDGPU::OPERAND_REG_IMM_FP64:
748   case AMDGPU::OPERAND_REG_IMM_FP16:
749   case AMDGPU::OPERAND_REG_INLINE_C_FP32:
750   case AMDGPU::OPERAND_REG_INLINE_C_FP64:
751   case AMDGPU::OPERAND_REG_INLINE_C_FP16:
752   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
753     return true;
754   default:
755     return false;
756   }
757 }
758 
759 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
760   assert(OpNo < Desc.NumOperands);
761   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
762   return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
763          OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
764 }
765 
766 // Avoid using MCRegisterClass::getSize, since that function will go away
767 // (move from MC* level to Target* level). Return size in bits.
768 unsigned getRegBitWidth(unsigned RCID) {
769   switch (RCID) {
770   case AMDGPU::SGPR_32RegClassID:
771   case AMDGPU::VGPR_32RegClassID:
772   case AMDGPU::VS_32RegClassID:
773   case AMDGPU::SReg_32RegClassID:
774   case AMDGPU::SReg_32_XM0RegClassID:
775     return 32;
776   case AMDGPU::SGPR_64RegClassID:
777   case AMDGPU::VS_64RegClassID:
778   case AMDGPU::SReg_64RegClassID:
779   case AMDGPU::VReg_64RegClassID:
780     return 64;
781   case AMDGPU::VReg_96RegClassID:
782     return 96;
783   case AMDGPU::SGPR_128RegClassID:
784   case AMDGPU::SReg_128RegClassID:
785   case AMDGPU::VReg_128RegClassID:
786     return 128;
787   case AMDGPU::SReg_256RegClassID:
788   case AMDGPU::VReg_256RegClassID:
789     return 256;
790   case AMDGPU::SReg_512RegClassID:
791   case AMDGPU::VReg_512RegClassID:
792     return 512;
793   default:
794     llvm_unreachable("Unexpected register class");
795   }
796 }
797 
798 unsigned getRegBitWidth(const MCRegisterClass &RC) {
799   return getRegBitWidth(RC.getID());
800 }
801 
802 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
803                            unsigned OpNo) {
804   assert(OpNo < Desc.NumOperands);
805   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
806   return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
807 }
808 
809 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
810   if (Literal >= -16 && Literal <= 64)
811     return true;
812 
813   uint64_t Val = static_cast<uint64_t>(Literal);
814   return (Val == DoubleToBits(0.0)) ||
815          (Val == DoubleToBits(1.0)) ||
816          (Val == DoubleToBits(-1.0)) ||
817          (Val == DoubleToBits(0.5)) ||
818          (Val == DoubleToBits(-0.5)) ||
819          (Val == DoubleToBits(2.0)) ||
820          (Val == DoubleToBits(-2.0)) ||
821          (Val == DoubleToBits(4.0)) ||
822          (Val == DoubleToBits(-4.0)) ||
823          (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
824 }
825 
826 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
827   if (Literal >= -16 && Literal <= 64)
828     return true;
829 
830   // The actual type of the operand does not seem to matter as long
831   // as the bits match one of the inline immediate values.  For example:
832   //
833   // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
834   // so it is a legal inline immediate.
835   //
836   // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
837   // floating-point, so it is a legal inline immediate.
838 
839   uint32_t Val = static_cast<uint32_t>(Literal);
840   return (Val == FloatToBits(0.0f)) ||
841          (Val == FloatToBits(1.0f)) ||
842          (Val == FloatToBits(-1.0f)) ||
843          (Val == FloatToBits(0.5f)) ||
844          (Val == FloatToBits(-0.5f)) ||
845          (Val == FloatToBits(2.0f)) ||
846          (Val == FloatToBits(-2.0f)) ||
847          (Val == FloatToBits(4.0f)) ||
848          (Val == FloatToBits(-4.0f)) ||
849          (Val == 0x3e22f983 && HasInv2Pi);
850 }
851 
852 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
853   if (!HasInv2Pi)
854     return false;
855 
856   if (Literal >= -16 && Literal <= 64)
857     return true;
858 
859   uint16_t Val = static_cast<uint16_t>(Literal);
860   return Val == 0x3C00 || // 1.0
861          Val == 0xBC00 || // -1.0
862          Val == 0x3800 || // 0.5
863          Val == 0xB800 || // -0.5
864          Val == 0x4000 || // 2.0
865          Val == 0xC000 || // -2.0
866          Val == 0x4400 || // 4.0
867          Val == 0xC400 || // -4.0
868          Val == 0x3118;   // 1/2pi
869 }
870 
871 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
872   assert(HasInv2Pi);
873 
874   if (!EnablePackedInlinableLiterals)
875     return false;
876 
877   int16_t Lo16 = static_cast<int16_t>(Literal);
878   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
879   return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
880 }
881 
882 bool isArgPassedInSGPR(const Argument *A) {
883   const Function *F = A->getParent();
884 
885   // Arguments to compute shaders are never a source of divergence.
886   CallingConv::ID CC = F->getCallingConv();
887   switch (CC) {
888   case CallingConv::AMDGPU_KERNEL:
889   case CallingConv::SPIR_KERNEL:
890     return true;
891   case CallingConv::AMDGPU_VS:
892   case CallingConv::AMDGPU_LS:
893   case CallingConv::AMDGPU_HS:
894   case CallingConv::AMDGPU_ES:
895   case CallingConv::AMDGPU_GS:
896   case CallingConv::AMDGPU_PS:
897   case CallingConv::AMDGPU_CS:
898     // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
899     // Everything else is in VGPRs.
900     return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
901            F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
902   default:
903     // TODO: Should calls support inreg for SGPR inputs?
904     return false;
905   }
906 }
907 
908 int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
909   if (isGCN3Encoding(ST))
910     return ByteOffset;
911   return ByteOffset >> 2;
912 }
913 
914 bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
915   int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
916   return isGCN3Encoding(ST) ?
917     isUInt<20>(EncodedOffset) : isUInt<8>(EncodedOffset);
918 }
919 
920 } // end namespace AMDGPU
921 
922 } // end namespace llvm
923 
924 namespace llvm {
925 namespace AMDGPU {
926 
927 AMDGPUAS getAMDGPUAS(Triple T) {
928   AMDGPUAS AS;
929   AS.FLAT_ADDRESS = 0;
930   AS.PRIVATE_ADDRESS = 5;
931   AS.REGION_ADDRESS = 2;
932   return AS;
933 }
934 
935 AMDGPUAS getAMDGPUAS(const TargetMachine &M) {
936   return getAMDGPUAS(M.getTargetTriple());
937 }
938 
939 AMDGPUAS getAMDGPUAS(const Module &M) {
940   return getAMDGPUAS(Triple(M.getTargetTriple()));
941 }
942 
943 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
944   switch (IntrID) {
945   case Intrinsic::amdgcn_workitem_id_x:
946   case Intrinsic::amdgcn_workitem_id_y:
947   case Intrinsic::amdgcn_workitem_id_z:
948   case Intrinsic::amdgcn_interp_mov:
949   case Intrinsic::amdgcn_interp_p1:
950   case Intrinsic::amdgcn_interp_p2:
951   case Intrinsic::amdgcn_mbcnt_hi:
952   case Intrinsic::amdgcn_mbcnt_lo:
953   case Intrinsic::r600_read_tidig_x:
954   case Intrinsic::r600_read_tidig_y:
955   case Intrinsic::r600_read_tidig_z:
956   case Intrinsic::amdgcn_atomic_inc:
957   case Intrinsic::amdgcn_atomic_dec:
958   case Intrinsic::amdgcn_ds_fadd:
959   case Intrinsic::amdgcn_ds_fmin:
960   case Intrinsic::amdgcn_ds_fmax:
961   case Intrinsic::amdgcn_image_atomic_swap:
962   case Intrinsic::amdgcn_image_atomic_add:
963   case Intrinsic::amdgcn_image_atomic_sub:
964   case Intrinsic::amdgcn_image_atomic_smin:
965   case Intrinsic::amdgcn_image_atomic_umin:
966   case Intrinsic::amdgcn_image_atomic_smax:
967   case Intrinsic::amdgcn_image_atomic_umax:
968   case Intrinsic::amdgcn_image_atomic_and:
969   case Intrinsic::amdgcn_image_atomic_or:
970   case Intrinsic::amdgcn_image_atomic_xor:
971   case Intrinsic::amdgcn_image_atomic_inc:
972   case Intrinsic::amdgcn_image_atomic_dec:
973   case Intrinsic::amdgcn_image_atomic_cmpswap:
974   case Intrinsic::amdgcn_buffer_atomic_swap:
975   case Intrinsic::amdgcn_buffer_atomic_add:
976   case Intrinsic::amdgcn_buffer_atomic_sub:
977   case Intrinsic::amdgcn_buffer_atomic_smin:
978   case Intrinsic::amdgcn_buffer_atomic_umin:
979   case Intrinsic::amdgcn_buffer_atomic_smax:
980   case Intrinsic::amdgcn_buffer_atomic_umax:
981   case Intrinsic::amdgcn_buffer_atomic_and:
982   case Intrinsic::amdgcn_buffer_atomic_or:
983   case Intrinsic::amdgcn_buffer_atomic_xor:
984   case Intrinsic::amdgcn_buffer_atomic_cmpswap:
985   case Intrinsic::amdgcn_ps_live:
986   case Intrinsic::amdgcn_ds_swizzle:
987     return true;
988   default:
989     return false;
990   }
991 }
992 } // namespace AMDGPU
993 } // namespace llvm
994