1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief R600 Implementation of TargetInstrInfo.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "R600InstrInfo.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 
26 using namespace llvm;
27 
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AMDGPUGenDFAPacketizer.inc"
30 
31 R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
32   : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
33 
34 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
35   return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
36 }
37 
38 void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
39                                 MachineBasicBlock::iterator MI,
40                                 const DebugLoc &DL, unsigned DestReg,
41                                 unsigned SrcReg, bool KillSrc) const {
42   unsigned VectorComponents = 0;
43   if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
44       AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
45       (AMDGPU::R600_Reg128RegClass.contains(SrcReg) ||
46        AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) {
47     VectorComponents = 4;
48   } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) ||
49             AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) &&
50             (AMDGPU::R600_Reg64RegClass.contains(SrcReg) ||
51              AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) {
52     VectorComponents = 2;
53   }
54 
55   if (VectorComponents > 0) {
56     for (unsigned I = 0; I < VectorComponents; I++) {
57       unsigned SubRegIndex = RI.getSubRegFromChannel(I);
58       buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
59                               RI.getSubReg(DestReg, SubRegIndex),
60                               RI.getSubReg(SrcReg, SubRegIndex))
61                               .addReg(DestReg,
62                                       RegState::Define | RegState::Implicit);
63     }
64   } else {
65     MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
66                                                   DestReg, SrcReg);
67     NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
68                                     .setIsKill(KillSrc);
69   }
70 }
71 
72 /// \returns true if \p MBBI can be moved into a new basic.
73 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
74                                        MachineBasicBlock::iterator MBBI) const {
75   for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
76                                         E = MBBI->operands_end(); I != E; ++I) {
77     if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
78         I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
79       return false;
80   }
81   return true;
82 }
83 
84 bool R600InstrInfo::isMov(unsigned Opcode) const {
85   switch(Opcode) {
86   default:
87     return false;
88   case AMDGPU::MOV:
89   case AMDGPU::MOV_IMM_F32:
90   case AMDGPU::MOV_IMM_I32:
91     return true;
92   }
93 }
94 
95 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
96   return false;
97 }
98 
99 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
100   switch(Opcode) {
101     default: return false;
102     case AMDGPU::CUBE_r600_pseudo:
103     case AMDGPU::CUBE_r600_real:
104     case AMDGPU::CUBE_eg_pseudo:
105     case AMDGPU::CUBE_eg_real:
106       return true;
107   }
108 }
109 
110 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
111   unsigned TargetFlags = get(Opcode).TSFlags;
112 
113   return (TargetFlags & R600_InstFlag::ALU_INST);
114 }
115 
116 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
117   unsigned TargetFlags = get(Opcode).TSFlags;
118 
119   return ((TargetFlags & R600_InstFlag::OP1) |
120           (TargetFlags & R600_InstFlag::OP2) |
121           (TargetFlags & R600_InstFlag::OP3));
122 }
123 
124 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
125   unsigned TargetFlags = get(Opcode).TSFlags;
126 
127   return ((TargetFlags & R600_InstFlag::LDS_1A) |
128           (TargetFlags & R600_InstFlag::LDS_1A1D) |
129           (TargetFlags & R600_InstFlag::LDS_1A2D));
130 }
131 
132 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
133   return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
134 }
135 
136 bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
137   if (isALUInstr(MI.getOpcode()))
138     return true;
139   if (isVector(MI) || isCubeOp(MI.getOpcode()))
140     return true;
141   switch (MI.getOpcode()) {
142   case AMDGPU::PRED_X:
143   case AMDGPU::INTERP_PAIR_XY:
144   case AMDGPU::INTERP_PAIR_ZW:
145   case AMDGPU::INTERP_VEC_LOAD:
146   case AMDGPU::COPY:
147   case AMDGPU::DOT_4:
148     return true;
149   default:
150     return false;
151   }
152 }
153 
154 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
155   if (ST.hasCaymanISA())
156     return false;
157   return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
158 }
159 
160 bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
161   return isTransOnly(MI.getOpcode());
162 }
163 
164 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
165   return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
166 }
167 
168 bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
169   return isVectorOnly(MI.getOpcode());
170 }
171 
172 bool R600InstrInfo::isExport(unsigned Opcode) const {
173   return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
174 }
175 
176 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
177   return ST.hasVertexCache() && IS_VTX(get(Opcode));
178 }
179 
180 bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
181   const MachineFunction *MF = MI.getParent()->getParent();
182   return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
183          usesVertexCache(MI.getOpcode());
184 }
185 
186 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
187   return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
188 }
189 
190 bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
191   const MachineFunction *MF = MI.getParent()->getParent();
192   return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
193           usesVertexCache(MI.getOpcode())) ||
194          usesTextureCache(MI.getOpcode());
195 }
196 
197 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
198   switch (Opcode) {
199   case AMDGPU::KILLGT:
200   case AMDGPU::GROUP_BARRIER:
201     return true;
202   default:
203     return false;
204   }
205 }
206 
207 bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
208   return MI.findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
209 }
210 
211 bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
212   return MI.findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
213 }
214 
215 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
216   if (!isALUInstr(MI.getOpcode())) {
217     return false;
218   }
219   for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
220                                         E = MI.operands_end();
221        I != E; ++I) {
222     if (!I->isReg() || !I->isUse() ||
223         TargetRegisterInfo::isVirtualRegister(I->getReg()))
224       continue;
225 
226     if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
227       return true;
228   }
229   return false;
230 }
231 
232 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
233   static const unsigned SrcSelTable[][2] = {
234     {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
235     {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
236     {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
237     {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
238     {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
239     {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
240     {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
241     {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
242     {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
243     {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
244     {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
245   };
246 
247   for (const auto &Row : SrcSelTable) {
248     if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
249       return getOperandIdx(Opcode, Row[1]);
250     }
251   }
252   return -1;
253 }
254 
255 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
256 R600InstrInfo::getSrcs(MachineInstr &MI) const {
257   SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
258 
259   if (MI.getOpcode() == AMDGPU::DOT_4) {
260     static const unsigned OpTable[8][2] = {
261       {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
262       {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
263       {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
264       {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
265       {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
266       {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
267       {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
268       {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
269     };
270 
271     for (unsigned j = 0; j < 8; j++) {
272       MachineOperand &MO =
273           MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
274       unsigned Reg = MO.getReg();
275       if (Reg == AMDGPU::ALU_CONST) {
276         MachineOperand &Sel =
277             MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
278         Result.push_back(std::make_pair(&MO, Sel.getImm()));
279         continue;
280       }
281 
282     }
283     return Result;
284   }
285 
286   static const unsigned OpTable[3][2] = {
287     {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
288     {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
289     {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
290   };
291 
292   for (unsigned j = 0; j < 3; j++) {
293     int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
294     if (SrcIdx < 0)
295       break;
296     MachineOperand &MO = MI.getOperand(SrcIdx);
297     unsigned Reg = MO.getReg();
298     if (Reg == AMDGPU::ALU_CONST) {
299       MachineOperand &Sel =
300           MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
301       Result.push_back(std::make_pair(&MO, Sel.getImm()));
302       continue;
303     }
304     if (Reg == AMDGPU::ALU_LITERAL_X) {
305       MachineOperand &Operand =
306           MI.getOperand(getOperandIdx(MI.getOpcode(), AMDGPU::OpName::literal));
307       if (Operand.isImm()) {
308         Result.push_back(std::make_pair(&MO, Operand.getImm()));
309         continue;
310       }
311       assert(Operand.isGlobal());
312     }
313     Result.push_back(std::make_pair(&MO, 0));
314   }
315   return Result;
316 }
317 
318 std::vector<std::pair<int, unsigned>>
319 R600InstrInfo::ExtractSrcs(MachineInstr &MI,
320                            const DenseMap<unsigned, unsigned> &PV,
321                            unsigned &ConstCount) const {
322   ConstCount = 0;
323   const std::pair<int, unsigned> DummyPair(-1, 0);
324   std::vector<std::pair<int, unsigned> > Result;
325   unsigned i = 0;
326   for (const auto &Src : getSrcs(MI)) {
327     ++i;
328     unsigned Reg = Src.first->getReg();
329     int Index = RI.getEncodingValue(Reg) & 0xff;
330     if (Reg == AMDGPU::OQAP) {
331       Result.push_back(std::make_pair(Index, 0U));
332     }
333     if (PV.find(Reg) != PV.end()) {
334       // 255 is used to tells its a PS/PV reg
335       Result.push_back(std::make_pair(255, 0U));
336       continue;
337     }
338     if (Index > 127) {
339       ConstCount++;
340       Result.push_back(DummyPair);
341       continue;
342     }
343     unsigned Chan = RI.getHWRegChan(Reg);
344     Result.push_back(std::make_pair(Index, Chan));
345   }
346   for (; i < 3; ++i)
347     Result.push_back(DummyPair);
348   return Result;
349 }
350 
351 static std::vector<std::pair<int, unsigned> >
352 Swizzle(std::vector<std::pair<int, unsigned> > Src,
353         R600InstrInfo::BankSwizzle Swz) {
354   if (Src[0] == Src[1])
355     Src[1].first = -1;
356   switch (Swz) {
357   case R600InstrInfo::ALU_VEC_012_SCL_210:
358     break;
359   case R600InstrInfo::ALU_VEC_021_SCL_122:
360     std::swap(Src[1], Src[2]);
361     break;
362   case R600InstrInfo::ALU_VEC_102_SCL_221:
363     std::swap(Src[0], Src[1]);
364     break;
365   case R600InstrInfo::ALU_VEC_120_SCL_212:
366     std::swap(Src[0], Src[1]);
367     std::swap(Src[0], Src[2]);
368     break;
369   case R600InstrInfo::ALU_VEC_201:
370     std::swap(Src[0], Src[2]);
371     std::swap(Src[0], Src[1]);
372     break;
373   case R600InstrInfo::ALU_VEC_210:
374     std::swap(Src[0], Src[2]);
375     break;
376   }
377   return Src;
378 }
379 
380 static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
381   switch (Swz) {
382   case R600InstrInfo::ALU_VEC_012_SCL_210: {
383     unsigned Cycles[3] = { 2, 1, 0};
384     return Cycles[Op];
385   }
386   case R600InstrInfo::ALU_VEC_021_SCL_122: {
387     unsigned Cycles[3] = { 1, 2, 2};
388     return Cycles[Op];
389   }
390   case R600InstrInfo::ALU_VEC_120_SCL_212: {
391     unsigned Cycles[3] = { 2, 1, 2};
392     return Cycles[Op];
393   }
394   case R600InstrInfo::ALU_VEC_102_SCL_221: {
395     unsigned Cycles[3] = { 2, 2, 1};
396     return Cycles[Op];
397   }
398   default:
399     llvm_unreachable("Wrong Swizzle for Trans Slot");
400   }
401 }
402 
403 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
404 /// in the same Instruction Group while meeting read port limitations given a
405 /// Swz swizzle sequence.
406 unsigned  R600InstrInfo::isLegalUpTo(
407     const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
408     const std::vector<R600InstrInfo::BankSwizzle> &Swz,
409     const std::vector<std::pair<int, unsigned> > &TransSrcs,
410     R600InstrInfo::BankSwizzle TransSwz) const {
411   int Vector[4][3];
412   memset(Vector, -1, sizeof(Vector));
413   for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
414     const std::vector<std::pair<int, unsigned> > &Srcs =
415         Swizzle(IGSrcs[i], Swz[i]);
416     for (unsigned j = 0; j < 3; j++) {
417       const std::pair<int, unsigned> &Src = Srcs[j];
418       if (Src.first < 0 || Src.first == 255)
419         continue;
420       if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
421         if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
422             Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
423             // The value from output queue A (denoted by register OQAP) can
424             // only be fetched during the first cycle.
425             return false;
426         }
427         // OQAP does not count towards the normal read port restrictions
428         continue;
429       }
430       if (Vector[Src.second][j] < 0)
431         Vector[Src.second][j] = Src.first;
432       if (Vector[Src.second][j] != Src.first)
433         return i;
434     }
435   }
436   // Now check Trans Alu
437   for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
438     const std::pair<int, unsigned> &Src = TransSrcs[i];
439     unsigned Cycle = getTransSwizzle(TransSwz, i);
440     if (Src.first < 0)
441       continue;
442     if (Src.first == 255)
443       continue;
444     if (Vector[Src.second][Cycle] < 0)
445       Vector[Src.second][Cycle] = Src.first;
446     if (Vector[Src.second][Cycle] != Src.first)
447       return IGSrcs.size() - 1;
448   }
449   return IGSrcs.size();
450 }
451 
452 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
453 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
454 /// Idx can be skipped
455 static bool
456 NextPossibleSolution(
457     std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
458     unsigned Idx) {
459   assert(Idx < SwzCandidate.size());
460   int ResetIdx = Idx;
461   while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
462     ResetIdx --;
463   for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
464     SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
465   }
466   if (ResetIdx == -1)
467     return false;
468   int NextSwizzle = SwzCandidate[ResetIdx] + 1;
469   SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
470   return true;
471 }
472 
473 /// Enumerate all possible Swizzle sequence to find one that can meet all
474 /// read port requirements.
475 bool R600InstrInfo::FindSwizzleForVectorSlot(
476     const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
477     std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
478     const std::vector<std::pair<int, unsigned> > &TransSrcs,
479     R600InstrInfo::BankSwizzle TransSwz) const {
480   unsigned ValidUpTo = 0;
481   do {
482     ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
483     if (ValidUpTo == IGSrcs.size())
484       return true;
485   } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
486   return false;
487 }
488 
489 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
490 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
491 static bool
492 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
493                   const std::vector<std::pair<int, unsigned> > &TransOps,
494                   unsigned ConstCount) {
495   // TransALU can't read 3 constants
496   if (ConstCount > 2)
497     return false;
498   for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
499     const std::pair<int, unsigned> &Src = TransOps[i];
500     unsigned Cycle = getTransSwizzle(TransSwz, i);
501     if (Src.first < 0)
502       continue;
503     if (ConstCount > 0 && Cycle == 0)
504       return false;
505     if (ConstCount > 1 && Cycle == 1)
506       return false;
507   }
508   return true;
509 }
510 
511 bool
512 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
513                                        const DenseMap<unsigned, unsigned> &PV,
514                                        std::vector<BankSwizzle> &ValidSwizzle,
515                                        bool isLastAluTrans)
516     const {
517   //Todo : support shared src0 - src1 operand
518 
519   std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
520   ValidSwizzle.clear();
521   unsigned ConstCount;
522   BankSwizzle TransBS = ALU_VEC_012_SCL_210;
523   for (unsigned i = 0, e = IG.size(); i < e; ++i) {
524     IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
525     unsigned Op = getOperandIdx(IG[i]->getOpcode(),
526         AMDGPU::OpName::bank_swizzle);
527     ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
528         IG[i]->getOperand(Op).getImm());
529   }
530   std::vector<std::pair<int, unsigned> > TransOps;
531   if (!isLastAluTrans)
532     return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
533 
534   TransOps = std::move(IGSrcs.back());
535   IGSrcs.pop_back();
536   ValidSwizzle.pop_back();
537 
538   static const R600InstrInfo::BankSwizzle TransSwz[] = {
539     ALU_VEC_012_SCL_210,
540     ALU_VEC_021_SCL_122,
541     ALU_VEC_120_SCL_212,
542     ALU_VEC_102_SCL_221
543   };
544   for (unsigned i = 0; i < 4; i++) {
545     TransBS = TransSwz[i];
546     if (!isConstCompatible(TransBS, TransOps, ConstCount))
547       continue;
548     bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
549         TransBS);
550     if (Result) {
551       ValidSwizzle.push_back(TransBS);
552       return true;
553     }
554   }
555 
556   return false;
557 }
558 
559 
560 bool
561 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
562     const {
563   assert (Consts.size() <= 12 && "Too many operands in instructions group");
564   unsigned Pair1 = 0, Pair2 = 0;
565   for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
566     unsigned ReadConstHalf = Consts[i] & 2;
567     unsigned ReadConstIndex = Consts[i] & (~3);
568     unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
569     if (!Pair1) {
570       Pair1 = ReadHalfConst;
571       continue;
572     }
573     if (Pair1 == ReadHalfConst)
574       continue;
575     if (!Pair2) {
576       Pair2 = ReadHalfConst;
577       continue;
578     }
579     if (Pair2 != ReadHalfConst)
580       return false;
581   }
582   return true;
583 }
584 
585 bool
586 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
587     const {
588   std::vector<unsigned> Consts;
589   SmallSet<int64_t, 4> Literals;
590   for (unsigned i = 0, n = MIs.size(); i < n; i++) {
591     MachineInstr &MI = *MIs[i];
592     if (!isALUInstr(MI.getOpcode()))
593       continue;
594 
595     for (const auto &Src : getSrcs(MI)) {
596       if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
597         Literals.insert(Src.second);
598       if (Literals.size() > 4)
599         return false;
600       if (Src.first->getReg() == AMDGPU::ALU_CONST)
601         Consts.push_back(Src.second);
602       if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
603           AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
604         unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
605         unsigned Chan = RI.getHWRegChan(Src.first->getReg());
606         Consts.push_back((Index << 2) | Chan);
607       }
608     }
609   }
610   return fitsConstReadLimitations(Consts);
611 }
612 
613 DFAPacketizer *
614 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
615   const InstrItineraryData *II = STI.getInstrItineraryData();
616   return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II);
617 }
618 
619 static bool
620 isPredicateSetter(unsigned Opcode) {
621   switch (Opcode) {
622   case AMDGPU::PRED_X:
623     return true;
624   default:
625     return false;
626   }
627 }
628 
629 static MachineInstr *
630 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
631                              MachineBasicBlock::iterator I) {
632   while (I != MBB.begin()) {
633     --I;
634     MachineInstr &MI = *I;
635     if (isPredicateSetter(MI.getOpcode()))
636       return &MI;
637   }
638 
639   return nullptr;
640 }
641 
642 static
643 bool isJump(unsigned Opcode) {
644   return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
645 }
646 
647 static bool isBranch(unsigned Opcode) {
648   return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
649       Opcode == AMDGPU::BRANCH_COND_f32;
650 }
651 
652 bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
653                                   MachineBasicBlock *&TBB,
654                                   MachineBasicBlock *&FBB,
655                                   SmallVectorImpl<MachineOperand> &Cond,
656                                   bool AllowModify) const {
657   // Most of the following comes from the ARM implementation of AnalyzeBranch
658 
659   // If the block has no terminators, it just falls into the block after it.
660   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
661   if (I == MBB.end())
662     return false;
663 
664   // AMDGPU::BRANCH* instructions are only available after isel and are not
665   // handled
666   if (isBranch(I->getOpcode()))
667     return true;
668   if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
669     return false;
670   }
671 
672   // Remove successive JUMP
673   while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
674       MachineBasicBlock::iterator PriorI = std::prev(I);
675       if (AllowModify)
676         I->removeFromParent();
677       I = PriorI;
678   }
679   MachineInstr &LastInst = *I;
680 
681   // If there is only one terminator instruction, process it.
682   unsigned LastOpc = LastInst.getOpcode();
683   if (I == MBB.begin() ||
684           !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
685     if (LastOpc == AMDGPU::JUMP) {
686       TBB = LastInst.getOperand(0).getMBB();
687       return false;
688     } else if (LastOpc == AMDGPU::JUMP_COND) {
689       auto predSet = I;
690       while (!isPredicateSetter(predSet->getOpcode())) {
691         predSet = --I;
692       }
693       TBB = LastInst.getOperand(0).getMBB();
694       Cond.push_back(predSet->getOperand(1));
695       Cond.push_back(predSet->getOperand(2));
696       Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
697       return false;
698     }
699     return true;  // Can't handle indirect branch.
700   }
701 
702   // Get the instruction before it if it is a terminator.
703   MachineInstr &SecondLastInst = *I;
704   unsigned SecondLastOpc = SecondLastInst.getOpcode();
705 
706   // If the block ends with a B and a Bcc, handle it.
707   if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
708     auto predSet = --I;
709     while (!isPredicateSetter(predSet->getOpcode())) {
710       predSet = --I;
711     }
712     TBB = SecondLastInst.getOperand(0).getMBB();
713     FBB = LastInst.getOperand(0).getMBB();
714     Cond.push_back(predSet->getOperand(1));
715     Cond.push_back(predSet->getOperand(2));
716     Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
717     return false;
718   }
719 
720   // Otherwise, can't handle this.
721   return true;
722 }
723 
724 static
725 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
726   for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
727       It != E; ++It) {
728     if (It->getOpcode() == AMDGPU::CF_ALU ||
729         It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
730       return std::prev(It.base());
731   }
732   return MBB.end();
733 }
734 
735 unsigned R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
736                                      MachineBasicBlock *TBB,
737                                      MachineBasicBlock *FBB,
738                                      ArrayRef<MachineOperand> Cond,
739                                      const DebugLoc &DL) const {
740   assert(TBB && "InsertBranch must not be told to insert a fallthrough");
741 
742   if (!FBB) {
743     if (Cond.empty()) {
744       BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
745       return 1;
746     } else {
747       MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
748       assert(PredSet && "No previous predicate !");
749       addFlag(*PredSet, 0, MO_FLAG_PUSH);
750       PredSet->getOperand(2).setImm(Cond[1].getImm());
751 
752       BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
753              .addMBB(TBB)
754              .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
755       MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
756       if (CfAlu == MBB.end())
757         return 1;
758       assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
759       CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
760       return 1;
761     }
762   } else {
763     MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
764     assert(PredSet && "No previous predicate !");
765     addFlag(*PredSet, 0, MO_FLAG_PUSH);
766     PredSet->getOperand(2).setImm(Cond[1].getImm());
767     BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
768             .addMBB(TBB)
769             .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
770     BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
771     MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
772     if (CfAlu == MBB.end())
773       return 2;
774     assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
775     CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
776     return 2;
777   }
778 }
779 
780 unsigned
781 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
782 
783   // Note : we leave PRED* instructions there.
784   // They may be needed when predicating instructions.
785 
786   MachineBasicBlock::iterator I = MBB.end();
787 
788   if (I == MBB.begin()) {
789     return 0;
790   }
791   --I;
792   switch (I->getOpcode()) {
793   default:
794     return 0;
795   case AMDGPU::JUMP_COND: {
796     MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
797     clearFlag(*predSet, 0, MO_FLAG_PUSH);
798     I->eraseFromParent();
799     MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
800     if (CfAlu == MBB.end())
801       break;
802     assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
803     CfAlu->setDesc(get(AMDGPU::CF_ALU));
804     break;
805   }
806   case AMDGPU::JUMP:
807     I->eraseFromParent();
808     break;
809   }
810   I = MBB.end();
811 
812   if (I == MBB.begin()) {
813     return 1;
814   }
815   --I;
816   switch (I->getOpcode()) {
817     // FIXME: only one case??
818   default:
819     return 1;
820   case AMDGPU::JUMP_COND: {
821     MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
822     clearFlag(*predSet, 0, MO_FLAG_PUSH);
823     I->eraseFromParent();
824     MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
825     if (CfAlu == MBB.end())
826       break;
827     assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
828     CfAlu->setDesc(get(AMDGPU::CF_ALU));
829     break;
830   }
831   case AMDGPU::JUMP:
832     I->eraseFromParent();
833     break;
834   }
835   return 2;
836 }
837 
838 bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
839   int idx = MI.findFirstPredOperandIdx();
840   if (idx < 0)
841     return false;
842 
843   unsigned Reg = MI.getOperand(idx).getReg();
844   switch (Reg) {
845   default: return false;
846   case AMDGPU::PRED_SEL_ONE:
847   case AMDGPU::PRED_SEL_ZERO:
848   case AMDGPU::PREDICATE_BIT:
849     return true;
850   }
851 }
852 
853 bool R600InstrInfo::isPredicable(MachineInstr &MI) const {
854   // XXX: KILL* instructions can be predicated, but they must be the last
855   // instruction in a clause, so this means any instructions after them cannot
856   // be predicated.  Until we have proper support for instruction clauses in the
857   // backend, we will mark KILL* instructions as unpredicable.
858 
859   if (MI.getOpcode() == AMDGPU::KILLGT) {
860     return false;
861   } else if (MI.getOpcode() == AMDGPU::CF_ALU) {
862     // If the clause start in the middle of MBB then the MBB has more
863     // than a single clause, unable to predicate several clauses.
864     if (MI.getParent()->begin() != MachineBasicBlock::iterator(MI))
865       return false;
866     // TODO: We don't support KC merging atm
867     return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
868   } else if (isVector(MI)) {
869     return false;
870   } else {
871     return AMDGPUInstrInfo::isPredicable(MI);
872   }
873 }
874 
875 
876 bool
877 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
878                                    unsigned NumCyles,
879                                    unsigned ExtraPredCycles,
880                                    BranchProbability Probability) const{
881   return true;
882 }
883 
884 bool
885 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
886                                    unsigned NumTCycles,
887                                    unsigned ExtraTCycles,
888                                    MachineBasicBlock &FMBB,
889                                    unsigned NumFCycles,
890                                    unsigned ExtraFCycles,
891                                    BranchProbability Probability) const {
892   return true;
893 }
894 
895 bool
896 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
897                                          unsigned NumCyles,
898                                          BranchProbability Probability)
899                                          const {
900   return true;
901 }
902 
903 bool
904 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
905                                          MachineBasicBlock &FMBB) const {
906   return false;
907 }
908 
909 
910 bool
911 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
912   MachineOperand &MO = Cond[1];
913   switch (MO.getImm()) {
914   case OPCODE_IS_ZERO_INT:
915     MO.setImm(OPCODE_IS_NOT_ZERO_INT);
916     break;
917   case OPCODE_IS_NOT_ZERO_INT:
918     MO.setImm(OPCODE_IS_ZERO_INT);
919     break;
920   case OPCODE_IS_ZERO:
921     MO.setImm(OPCODE_IS_NOT_ZERO);
922     break;
923   case OPCODE_IS_NOT_ZERO:
924     MO.setImm(OPCODE_IS_ZERO);
925     break;
926   default:
927     return true;
928   }
929 
930   MachineOperand &MO2 = Cond[2];
931   switch (MO2.getReg()) {
932   case AMDGPU::PRED_SEL_ZERO:
933     MO2.setReg(AMDGPU::PRED_SEL_ONE);
934     break;
935   case AMDGPU::PRED_SEL_ONE:
936     MO2.setReg(AMDGPU::PRED_SEL_ZERO);
937     break;
938   default:
939     return true;
940   }
941   return false;
942 }
943 
944 bool R600InstrInfo::DefinesPredicate(MachineInstr &MI,
945                                      std::vector<MachineOperand> &Pred) const {
946   return isPredicateSetter(MI.getOpcode());
947 }
948 
949 
950 bool R600InstrInfo::PredicateInstruction(MachineInstr &MI,
951                                          ArrayRef<MachineOperand> Pred) const {
952   int PIdx = MI.findFirstPredOperandIdx();
953 
954   if (MI.getOpcode() == AMDGPU::CF_ALU) {
955     MI.getOperand(8).setImm(0);
956     return true;
957   }
958 
959   if (MI.getOpcode() == AMDGPU::DOT_4) {
960     MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_X))
961         .setReg(Pred[2].getReg());
962     MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Y))
963         .setReg(Pred[2].getReg());
964     MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Z))
965         .setReg(Pred[2].getReg());
966     MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_W))
967         .setReg(Pred[2].getReg());
968     MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
969     MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
970     return true;
971   }
972 
973   if (PIdx != -1) {
974     MachineOperand &PMO = MI.getOperand(PIdx);
975     PMO.setReg(Pred[2].getReg());
976     MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
977     MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
978     return true;
979   }
980 
981   return false;
982 }
983 
984 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
985   return 2;
986 }
987 
988 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
989                                             const MachineInstr &,
990                                             unsigned *PredCost) const {
991   if (PredCost)
992     *PredCost = 2;
993   return 2;
994 }
995 
996 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
997                                                    unsigned Channel) const {
998   assert(Channel == 0);
999   return RegIndex;
1000 }
1001 
1002 bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1003   switch (MI.getOpcode()) {
1004   default: {
1005     MachineBasicBlock *MBB = MI.getParent();
1006     int OffsetOpIdx =
1007         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::addr);
1008     // addr is a custom operand with multiple MI operands, and only the
1009     // first MI operand is given a name.
1010     int RegOpIdx = OffsetOpIdx + 1;
1011     int ChanOpIdx =
1012         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::chan);
1013     if (isRegisterLoad(MI)) {
1014       int DstOpIdx =
1015           AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
1016       unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1017       unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1018       unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1019       unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1020       if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
1021         buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
1022                       getIndirectAddrRegClass()->getRegister(Address));
1023       } else {
1024         buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
1025                           OffsetReg);
1026       }
1027     } else if (isRegisterStore(MI)) {
1028       int ValOpIdx =
1029           AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::val);
1030       unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1031       unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1032       unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1033       unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1034       if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
1035         buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
1036                       MI.getOperand(ValOpIdx).getReg());
1037       } else {
1038         buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
1039                            calculateIndirectAddress(RegIndex, Channel),
1040                            OffsetReg);
1041       }
1042     } else {
1043       return false;
1044     }
1045 
1046     MBB->erase(MI);
1047     return true;
1048   }
1049   case AMDGPU::R600_EXTRACT_ELT_V2:
1050   case AMDGPU::R600_EXTRACT_ELT_V4:
1051     buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
1052                       RI.getHWRegIndex(MI.getOperand(1).getReg()), //  Address
1053                       MI.getOperand(2).getReg(),
1054                       RI.getHWRegChan(MI.getOperand(1).getReg()));
1055     break;
1056   case AMDGPU::R600_INSERT_ELT_V2:
1057   case AMDGPU::R600_INSERT_ELT_V4:
1058     buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
1059                        RI.getHWRegIndex(MI.getOperand(1).getReg()),   // Address
1060                        MI.getOperand(3).getReg(),                     // Offset
1061                        RI.getHWRegChan(MI.getOperand(1).getReg()));   // Channel
1062     break;
1063   }
1064   MI.eraseFromParent();
1065   return true;
1066 }
1067 
1068 void  R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1069                                              const MachineFunction &MF) const {
1070   const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1071   const R600FrameLowering *TFL = ST.getFrameLowering();
1072 
1073   unsigned StackWidth = TFL->getStackWidth(MF);
1074   int End = getIndirectIndexEnd(MF);
1075 
1076   if (End == -1)
1077     return;
1078 
1079   for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1080     unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
1081     Reserved.set(SuperReg);
1082     for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1083       unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1084       Reserved.set(Reg);
1085     }
1086   }
1087 }
1088 
1089 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1090   return &AMDGPU::R600_TReg32_XRegClass;
1091 }
1092 
1093 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1094                                        MachineBasicBlock::iterator I,
1095                                        unsigned ValueReg, unsigned Address,
1096                                        unsigned OffsetReg) const {
1097   return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1098 }
1099 
1100 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1101                                        MachineBasicBlock::iterator I,
1102                                        unsigned ValueReg, unsigned Address,
1103                                        unsigned OffsetReg,
1104                                        unsigned AddrChan) const {
1105   unsigned AddrReg;
1106   switch (AddrChan) {
1107     default: llvm_unreachable("Invalid Channel");
1108     case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1109     case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1110     case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1111     case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1112   }
1113   MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1114                                                AMDGPU::AR_X, OffsetReg);
1115   setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
1116 
1117   MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1118                                       AddrReg, ValueReg)
1119                                       .addReg(AMDGPU::AR_X,
1120                                            RegState::Implicit | RegState::Kill);
1121   setImmOperand(*Mov, AMDGPU::OpName::dst_rel, 1);
1122   return Mov;
1123 }
1124 
1125 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1126                                        MachineBasicBlock::iterator I,
1127                                        unsigned ValueReg, unsigned Address,
1128                                        unsigned OffsetReg) const {
1129   return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1130 }
1131 
1132 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1133                                        MachineBasicBlock::iterator I,
1134                                        unsigned ValueReg, unsigned Address,
1135                                        unsigned OffsetReg,
1136                                        unsigned AddrChan) const {
1137   unsigned AddrReg;
1138   switch (AddrChan) {
1139     default: llvm_unreachable("Invalid Channel");
1140     case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1141     case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1142     case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1143     case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1144   }
1145   MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1146                                                        AMDGPU::AR_X,
1147                                                        OffsetReg);
1148   setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
1149   MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1150                                       ValueReg,
1151                                       AddrReg)
1152                                       .addReg(AMDGPU::AR_X,
1153                                            RegState::Implicit | RegState::Kill);
1154   setImmOperand(*Mov, AMDGPU::OpName::src0_rel, 1);
1155 
1156   return Mov;
1157 }
1158 
1159 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
1160   const MachineRegisterInfo &MRI = MF.getRegInfo();
1161   const MachineFrameInfo &MFI = MF.getFrameInfo();
1162   int Offset = -1;
1163 
1164   if (MFI.getNumObjects() == 0) {
1165     return -1;
1166   }
1167 
1168   if (MRI.livein_empty()) {
1169     return 0;
1170   }
1171 
1172   const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
1173   for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1174                                             LE = MRI.livein_end();
1175                                             LI != LE; ++LI) {
1176     unsigned Reg = LI->first;
1177     if (TargetRegisterInfo::isVirtualRegister(Reg) ||
1178         !IndirectRC->contains(Reg))
1179       continue;
1180 
1181     unsigned RegIndex;
1182     unsigned RegEnd;
1183     for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
1184                                                           ++RegIndex) {
1185       if (IndirectRC->getRegister(RegIndex) == Reg)
1186         break;
1187     }
1188     Offset = std::max(Offset, (int)RegIndex);
1189   }
1190 
1191   return Offset + 1;
1192 }
1193 
1194 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
1195   int Offset = 0;
1196   const MachineFrameInfo &MFI = MF.getFrameInfo();
1197 
1198   // Variable sized objects are not supported
1199   if (MFI.hasVarSizedObjects()) {
1200     return -1;
1201   }
1202 
1203   if (MFI.getNumObjects() == 0) {
1204     return -1;
1205   }
1206 
1207   const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1208   const R600FrameLowering *TFL = ST.getFrameLowering();
1209 
1210   unsigned IgnoredFrameReg;
1211   Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg);
1212 
1213   return getIndirectIndexBegin(MF) + Offset;
1214 }
1215 
1216 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1217   return 115;
1218 }
1219 
1220 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1221                                                   MachineBasicBlock::iterator I,
1222                                                   unsigned Opcode,
1223                                                   unsigned DstReg,
1224                                                   unsigned Src0Reg,
1225                                                   unsigned Src1Reg) const {
1226   MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1227     DstReg);           // $dst
1228 
1229   if (Src1Reg) {
1230     MIB.addImm(0)     // $update_exec_mask
1231        .addImm(0);    // $update_predicate
1232   }
1233   MIB.addImm(1)        // $write
1234      .addImm(0)        // $omod
1235      .addImm(0)        // $dst_rel
1236      .addImm(0)        // $dst_clamp
1237      .addReg(Src0Reg)  // $src0
1238      .addImm(0)        // $src0_neg
1239      .addImm(0)        // $src0_rel
1240      .addImm(0)        // $src0_abs
1241      .addImm(-1);       // $src0_sel
1242 
1243   if (Src1Reg) {
1244     MIB.addReg(Src1Reg) // $src1
1245        .addImm(0)       // $src1_neg
1246        .addImm(0)       // $src1_rel
1247        .addImm(0)       // $src1_abs
1248        .addImm(-1);      // $src1_sel
1249   }
1250 
1251   //XXX: The r600g finalizer expects this to be 1, once we've moved the
1252   //scheduling to the backend, we can change the default to 0.
1253   MIB.addImm(1)        // $last
1254       .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1255       .addImm(0)         // $literal
1256       .addImm(0);        // $bank_swizzle
1257 
1258   return MIB;
1259 }
1260 
1261 #define OPERAND_CASE(Label) \
1262   case Label: { \
1263     static const unsigned Ops[] = \
1264     { \
1265       Label##_X, \
1266       Label##_Y, \
1267       Label##_Z, \
1268       Label##_W \
1269     }; \
1270     return Ops[Slot]; \
1271   }
1272 
1273 static unsigned getSlotedOps(unsigned  Op, unsigned Slot) {
1274   switch (Op) {
1275   OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1276   OPERAND_CASE(AMDGPU::OpName::update_pred)
1277   OPERAND_CASE(AMDGPU::OpName::write)
1278   OPERAND_CASE(AMDGPU::OpName::omod)
1279   OPERAND_CASE(AMDGPU::OpName::dst_rel)
1280   OPERAND_CASE(AMDGPU::OpName::clamp)
1281   OPERAND_CASE(AMDGPU::OpName::src0)
1282   OPERAND_CASE(AMDGPU::OpName::src0_neg)
1283   OPERAND_CASE(AMDGPU::OpName::src0_rel)
1284   OPERAND_CASE(AMDGPU::OpName::src0_abs)
1285   OPERAND_CASE(AMDGPU::OpName::src0_sel)
1286   OPERAND_CASE(AMDGPU::OpName::src1)
1287   OPERAND_CASE(AMDGPU::OpName::src1_neg)
1288   OPERAND_CASE(AMDGPU::OpName::src1_rel)
1289   OPERAND_CASE(AMDGPU::OpName::src1_abs)
1290   OPERAND_CASE(AMDGPU::OpName::src1_sel)
1291   OPERAND_CASE(AMDGPU::OpName::pred_sel)
1292   default:
1293     llvm_unreachable("Wrong Operand");
1294   }
1295 }
1296 
1297 #undef OPERAND_CASE
1298 
1299 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1300     MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1301     const {
1302   assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1303   unsigned Opcode;
1304   if (ST.getGeneration() <= R600Subtarget::R700)
1305     Opcode = AMDGPU::DOT4_r600;
1306   else
1307     Opcode = AMDGPU::DOT4_eg;
1308   MachineBasicBlock::iterator I = MI;
1309   MachineOperand &Src0 = MI->getOperand(
1310       getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1311   MachineOperand &Src1 = MI->getOperand(
1312       getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1313   MachineInstr *MIB = buildDefaultInstruction(
1314       MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1315   static const unsigned  Operands[14] = {
1316     AMDGPU::OpName::update_exec_mask,
1317     AMDGPU::OpName::update_pred,
1318     AMDGPU::OpName::write,
1319     AMDGPU::OpName::omod,
1320     AMDGPU::OpName::dst_rel,
1321     AMDGPU::OpName::clamp,
1322     AMDGPU::OpName::src0_neg,
1323     AMDGPU::OpName::src0_rel,
1324     AMDGPU::OpName::src0_abs,
1325     AMDGPU::OpName::src0_sel,
1326     AMDGPU::OpName::src1_neg,
1327     AMDGPU::OpName::src1_rel,
1328     AMDGPU::OpName::src1_abs,
1329     AMDGPU::OpName::src1_sel,
1330   };
1331 
1332   MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1333       getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
1334   MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
1335       .setReg(MO.getReg());
1336 
1337   for (unsigned i = 0; i < 14; i++) {
1338     MachineOperand &MO = MI->getOperand(
1339         getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1340     assert (MO.isImm());
1341     setImmOperand(*MIB, Operands[i], MO.getImm());
1342   }
1343   MIB->getOperand(20).setImm(0);
1344   return MIB;
1345 }
1346 
1347 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1348                                          MachineBasicBlock::iterator I,
1349                                          unsigned DstReg,
1350                                          uint64_t Imm) const {
1351   MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1352                                                   AMDGPU::ALU_LITERAL_X);
1353   setImmOperand(*MovImm, AMDGPU::OpName::literal, Imm);
1354   return MovImm;
1355 }
1356 
1357 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1358                                        MachineBasicBlock::iterator I,
1359                                        unsigned DstReg, unsigned SrcReg) const {
1360   return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
1361 }
1362 
1363 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1364   return getOperandIdx(MI.getOpcode(), Op);
1365 }
1366 
1367 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1368   return AMDGPU::getNamedOperandIdx(Opcode, Op);
1369 }
1370 
1371 void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
1372                                   int64_t Imm) const {
1373   int Idx = getOperandIdx(MI, Op);
1374   assert(Idx != -1 && "Operand not supported for this instruction.");
1375   assert(MI.getOperand(Idx).isImm());
1376   MI.getOperand(Idx).setImm(Imm);
1377 }
1378 
1379 //===----------------------------------------------------------------------===//
1380 // Instruction flag getters/setters
1381 //===----------------------------------------------------------------------===//
1382 
1383 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
1384                                          unsigned Flag) const {
1385   unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1386   int FlagIndex = 0;
1387   if (Flag != 0) {
1388     // If we pass something other than the default value of Flag to this
1389     // function, it means we are want to set a flag on an instruction
1390     // that uses native encoding.
1391     assert(HAS_NATIVE_OPERANDS(TargetFlags));
1392     bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1393     switch (Flag) {
1394     case MO_FLAG_CLAMP:
1395       FlagIndex = getOperandIdx(MI, AMDGPU::OpName::clamp);
1396       break;
1397     case MO_FLAG_MASK:
1398       FlagIndex = getOperandIdx(MI, AMDGPU::OpName::write);
1399       break;
1400     case MO_FLAG_NOT_LAST:
1401     case MO_FLAG_LAST:
1402       FlagIndex = getOperandIdx(MI, AMDGPU::OpName::last);
1403       break;
1404     case MO_FLAG_NEG:
1405       switch (SrcIdx) {
1406       case 0:
1407         FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_neg);
1408         break;
1409       case 1:
1410         FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_neg);
1411         break;
1412       case 2:
1413         FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src2_neg);
1414         break;
1415       }
1416       break;
1417 
1418     case MO_FLAG_ABS:
1419       assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1420                        "instructions.");
1421       (void)IsOP3;
1422       switch (SrcIdx) {
1423       case 0:
1424         FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_abs);
1425         break;
1426       case 1:
1427         FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_abs);
1428         break;
1429       }
1430       break;
1431 
1432     default:
1433       FlagIndex = -1;
1434       break;
1435     }
1436     assert(FlagIndex != -1 && "Flag not supported for this instruction");
1437   } else {
1438       FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1439       assert(FlagIndex != 0 &&
1440          "Instruction flags not supported for this instruction");
1441   }
1442 
1443   MachineOperand &FlagOp = MI.getOperand(FlagIndex);
1444   assert(FlagOp.isImm());
1445   return FlagOp;
1446 }
1447 
1448 void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
1449                             unsigned Flag) const {
1450   unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1451   if (Flag == 0) {
1452     return;
1453   }
1454   if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1455     MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1456     if (Flag == MO_FLAG_NOT_LAST) {
1457       clearFlag(MI, Operand, MO_FLAG_LAST);
1458     } else if (Flag == MO_FLAG_MASK) {
1459       clearFlag(MI, Operand, Flag);
1460     } else {
1461       FlagOp.setImm(1);
1462     }
1463   } else {
1464       MachineOperand &FlagOp = getFlagOp(MI, Operand);
1465       FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1466   }
1467 }
1468 
1469 void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
1470                               unsigned Flag) const {
1471   unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1472   if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1473     MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1474     FlagOp.setImm(0);
1475   } else {
1476     MachineOperand &FlagOp = getFlagOp(MI);
1477     unsigned InstFlags = FlagOp.getImm();
1478     InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1479     FlagOp.setImm(InstFlags);
1480   }
1481 }
1482 
1483 bool R600InstrInfo::isRegisterStore(const MachineInstr &MI) const {
1484   return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
1485 }
1486 
1487 bool R600InstrInfo::isRegisterLoad(const MachineInstr &MI) const {
1488   return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
1489 }
1490