1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
12 /// AMD GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 
23 using namespace llvm;
24 
25 #define GET_INSTRINFO_CTOR_DTOR
26 #define GET_INSTRMAP_INFO
27 #include "AMDGPUGenInstrInfo.inc"
28 
29 // Pin the vtable to this file.
30 void AMDGPUInstrInfo::anchor() {}
31 
32 AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &ST)
33   : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN),
34     ST(ST),
35     AMDGPUASI(ST.getAMDGPUAS()) {}
36 
37 // FIXME: This behaves strangely. If, for example, you have 32 load + stores,
38 // the first 16 loads will be interleaved with the stores, and the next 16 will
39 // be clustered as expected. It should really split into 2 16 store batches.
40 //
41 // Loads are clustered until this returns false, rather than trying to schedule
42 // groups of stores. This also means we have to deal with saying different
43 // address space loads should be clustered, and ones which might cause bank
44 // conflicts.
45 //
46 // This might be deprecated so it might not be worth that much effort to fix.
47 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
48                                               int64_t Offset0, int64_t Offset1,
49                                               unsigned NumLoads) const {
50   assert(Offset1 > Offset0 &&
51          "Second offset should be larger than first offset!");
52   // If we have less than 16 loads in a row, and the offsets are within 64
53   // bytes, then schedule together.
54 
55   // A cacheline is 64 bytes (for global memory).
56   return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
57 }
58 
59 int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
60   switch (Channels) {
61   default: return Opcode;
62   case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
63   case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
64   case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
65   }
66 }
67 
68 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td
69 enum SIEncodingFamily {
70   SI = 0,
71   VI = 1,
72   SDWA = 2,
73   SDWA9 = 3,
74   GFX9 = 4
75 };
76 
77 // Wrapper for Tablegen'd function.  enum Subtarget is not defined in any
78 // header files, so we need to wrap it in a function that takes unsigned
79 // instead.
80 namespace llvm {
81 namespace AMDGPU {
82 static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
83   return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
84 }
85 }
86 }
87 
88 static SIEncodingFamily subtargetEncodingFamily(const AMDGPUSubtarget &ST) {
89   switch (ST.getGeneration()) {
90   case AMDGPUSubtarget::SOUTHERN_ISLANDS:
91   case AMDGPUSubtarget::SEA_ISLANDS:
92     return SIEncodingFamily::SI;
93   case AMDGPUSubtarget::VOLCANIC_ISLANDS:
94   case AMDGPUSubtarget::GFX9:
95     return SIEncodingFamily::VI;
96 
97   // FIXME: This should never be called for r600 GPUs.
98   case AMDGPUSubtarget::R600:
99   case AMDGPUSubtarget::R700:
100   case AMDGPUSubtarget::EVERGREEN:
101   case AMDGPUSubtarget::NORTHERN_ISLANDS:
102     return SIEncodingFamily::SI;
103   }
104 
105   llvm_unreachable("Unknown subtarget generation!");
106 }
107 
108 int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
109   SIEncodingFamily Gen = subtargetEncodingFamily(ST);
110   if (get(Opcode).TSFlags & SIInstrFlags::SDWA)
111     Gen = ST.getGeneration() == AMDGPUSubtarget::GFX9 ? SIEncodingFamily::SDWA9
112                                                       : SIEncodingFamily::SDWA;
113 
114   if ((get(Opcode).TSFlags & SIInstrFlags::F16_ZFILL) != 0 &&
115       ST.getGeneration() >= AMDGPUSubtarget::GFX9)
116     Gen = SIEncodingFamily::GFX9;
117 
118   int MCOp = AMDGPU::getMCOpcode(Opcode, Gen);
119 
120   // -1 means that Opcode is already a native instruction.
121   if (MCOp == -1)
122     return Opcode;
123 
124   // (uint16_t)-1 means that Opcode is a pseudo instruction that has
125   // no encoding in the given subtarget generation.
126   if (MCOp == (uint16_t)-1)
127     return -1;
128 
129   return MCOp;
130 }
131