1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief R600 Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "R600InstrInfo.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUSubtarget.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "R600Defines.h" 20 #include "R600MachineFunctionInfo.h" 21 #include "R600RegisterInfo.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 26 using namespace llvm; 27 28 #define GET_INSTRINFO_CTOR_DTOR 29 #include "AMDGPUGenDFAPacketizer.inc" 30 31 R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st) 32 : AMDGPUInstrInfo(st), RI() {} 33 34 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const { 35 return RI; 36 } 37 38 bool R600InstrInfo::isTrig(const MachineInstr &MI) const { 39 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG; 40 } 41 42 bool R600InstrInfo::isVector(const MachineInstr &MI) const { 43 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR; 44 } 45 46 void 47 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 48 MachineBasicBlock::iterator MI, DebugLoc DL, 49 unsigned DestReg, unsigned SrcReg, 50 bool KillSrc) const { 51 unsigned VectorComponents = 0; 52 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) || 53 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) && 54 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) || 55 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) { 56 VectorComponents = 4; 57 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) || 58 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) && 59 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) || 60 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) { 61 VectorComponents = 2; 62 } 63 64 if (VectorComponents > 0) { 65 for (unsigned I = 0; I < VectorComponents; I++) { 66 unsigned SubRegIndex = RI.getSubRegFromChannel(I); 67 buildDefaultInstruction(MBB, MI, AMDGPU::MOV, 68 RI.getSubReg(DestReg, SubRegIndex), 69 RI.getSubReg(SrcReg, SubRegIndex)) 70 .addReg(DestReg, 71 RegState::Define | RegState::Implicit); 72 } 73 } else { 74 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV, 75 DestReg, SrcReg); 76 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0)) 77 .setIsKill(KillSrc); 78 } 79 } 80 81 /// \returns true if \p MBBI can be moved into a new basic. 82 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 83 MachineBasicBlock::iterator MBBI) const { 84 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(), 85 E = MBBI->operands_end(); I != E; ++I) { 86 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) && 87 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg())) 88 return false; 89 } 90 return true; 91 } 92 93 bool R600InstrInfo::isMov(unsigned Opcode) const { 94 95 96 switch(Opcode) { 97 default: return false; 98 case AMDGPU::MOV: 99 case AMDGPU::MOV_IMM_F32: 100 case AMDGPU::MOV_IMM_I32: 101 return true; 102 } 103 } 104 105 // Some instructions act as place holders to emulate operations that the GPU 106 // hardware does automatically. This function can be used to check if 107 // an opcode falls into this category. 108 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const { 109 switch (Opcode) { 110 default: return false; 111 case AMDGPU::RETURN: 112 return true; 113 } 114 } 115 116 bool R600InstrInfo::isReductionOp(unsigned Opcode) const { 117 return false; 118 } 119 120 bool R600InstrInfo::isCubeOp(unsigned Opcode) const { 121 switch(Opcode) { 122 default: return false; 123 case AMDGPU::CUBE_r600_pseudo: 124 case AMDGPU::CUBE_r600_real: 125 case AMDGPU::CUBE_eg_pseudo: 126 case AMDGPU::CUBE_eg_real: 127 return true; 128 } 129 } 130 131 bool R600InstrInfo::isALUInstr(unsigned Opcode) const { 132 unsigned TargetFlags = get(Opcode).TSFlags; 133 134 return (TargetFlags & R600_InstFlag::ALU_INST); 135 } 136 137 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const { 138 unsigned TargetFlags = get(Opcode).TSFlags; 139 140 return ((TargetFlags & R600_InstFlag::OP1) | 141 (TargetFlags & R600_InstFlag::OP2) | 142 (TargetFlags & R600_InstFlag::OP3)); 143 } 144 145 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const { 146 unsigned TargetFlags = get(Opcode).TSFlags; 147 148 return ((TargetFlags & R600_InstFlag::LDS_1A) | 149 (TargetFlags & R600_InstFlag::LDS_1A1D) | 150 (TargetFlags & R600_InstFlag::LDS_1A2D)); 151 } 152 153 bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const { 154 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1; 155 } 156 157 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const { 158 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1; 159 } 160 161 bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const { 162 if (isALUInstr(MI->getOpcode())) 163 return true; 164 if (isVector(*MI) || isCubeOp(MI->getOpcode())) 165 return true; 166 switch (MI->getOpcode()) { 167 case AMDGPU::PRED_X: 168 case AMDGPU::INTERP_PAIR_XY: 169 case AMDGPU::INTERP_PAIR_ZW: 170 case AMDGPU::INTERP_VEC_LOAD: 171 case AMDGPU::COPY: 172 case AMDGPU::DOT_4: 173 return true; 174 default: 175 return false; 176 } 177 } 178 179 bool R600InstrInfo::isTransOnly(unsigned Opcode) const { 180 if (ST.hasCaymanISA()) 181 return false; 182 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU); 183 } 184 185 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const { 186 return isTransOnly(MI->getOpcode()); 187 } 188 189 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const { 190 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU); 191 } 192 193 bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const { 194 return isVectorOnly(MI->getOpcode()); 195 } 196 197 bool R600InstrInfo::isExport(unsigned Opcode) const { 198 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT); 199 } 200 201 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { 202 return ST.hasVertexCache() && IS_VTX(get(Opcode)); 203 } 204 205 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const { 206 const MachineFunction *MF = MI->getParent()->getParent(); 207 return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) && 208 usesVertexCache(MI->getOpcode()); 209 } 210 211 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { 212 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode)); 213 } 214 215 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const { 216 const MachineFunction *MF = MI->getParent()->getParent(); 217 return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) && 218 usesVertexCache(MI->getOpcode())) || 219 usesTextureCache(MI->getOpcode()); 220 } 221 222 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const { 223 switch (Opcode) { 224 case AMDGPU::KILLGT: 225 case AMDGPU::GROUP_BARRIER: 226 return true; 227 default: 228 return false; 229 } 230 } 231 232 bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const { 233 return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1; 234 } 235 236 bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const { 237 return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1; 238 } 239 240 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const { 241 if (!isALUInstr(MI->getOpcode())) { 242 return false; 243 } 244 for (MachineInstr::const_mop_iterator I = MI->operands_begin(), 245 E = MI->operands_end(); I != E; ++I) { 246 if (!I->isReg() || !I->isUse() || 247 TargetRegisterInfo::isVirtualRegister(I->getReg())) 248 continue; 249 250 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg())) 251 return true; 252 } 253 return false; 254 } 255 256 int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const { 257 static const unsigned OpTable[] = { 258 AMDGPU::OpName::src0, 259 AMDGPU::OpName::src1, 260 AMDGPU::OpName::src2 261 }; 262 263 assert (SrcNum < 3); 264 return getOperandIdx(Opcode, OpTable[SrcNum]); 265 } 266 267 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const { 268 static const unsigned SrcSelTable[][2] = { 269 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, 270 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, 271 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, 272 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, 273 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, 274 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, 275 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, 276 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, 277 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, 278 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, 279 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W} 280 }; 281 282 for (const auto &Row : SrcSelTable) { 283 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) { 284 return getOperandIdx(Opcode, Row[1]); 285 } 286 } 287 return -1; 288 } 289 290 SmallVector<std::pair<MachineOperand *, int64_t>, 3> 291 R600InstrInfo::getSrcs(MachineInstr *MI) const { 292 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result; 293 294 if (MI->getOpcode() == AMDGPU::DOT_4) { 295 static const unsigned OpTable[8][2] = { 296 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, 297 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, 298 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, 299 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, 300 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, 301 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, 302 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, 303 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}, 304 }; 305 306 for (unsigned j = 0; j < 8; j++) { 307 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), 308 OpTable[j][0])); 309 unsigned Reg = MO.getReg(); 310 if (Reg == AMDGPU::ALU_CONST) { 311 MachineOperand &Sel = MI->getOperand(getOperandIdx(MI->getOpcode(), 312 OpTable[j][1])); 313 Result.push_back(std::make_pair(&MO, Sel.getImm())); 314 continue; 315 } 316 317 } 318 return Result; 319 } 320 321 static const unsigned OpTable[3][2] = { 322 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, 323 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, 324 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, 325 }; 326 327 for (unsigned j = 0; j < 3; j++) { 328 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]); 329 if (SrcIdx < 0) 330 break; 331 MachineOperand &MO = MI->getOperand(SrcIdx); 332 unsigned Reg = MO.getReg(); 333 if (Reg == AMDGPU::ALU_CONST) { 334 MachineOperand &Sel = MI->getOperand( 335 getOperandIdx(MI->getOpcode(), OpTable[j][1])); 336 Result.push_back(std::make_pair(&MO, Sel.getImm())); 337 continue; 338 } 339 if (Reg == AMDGPU::ALU_LITERAL_X) { 340 MachineOperand &Operand = MI->getOperand( 341 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)); 342 if (Operand.isImm()) { 343 Result.push_back(std::make_pair(&MO, Operand.getImm())); 344 continue; 345 } 346 assert(Operand.isGlobal()); 347 } 348 Result.push_back(std::make_pair(&MO, 0)); 349 } 350 return Result; 351 } 352 353 std::vector<std::pair<int, unsigned> > 354 R600InstrInfo::ExtractSrcs(MachineInstr *MI, 355 const DenseMap<unsigned, unsigned> &PV, 356 unsigned &ConstCount) const { 357 ConstCount = 0; 358 ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI); 359 const std::pair<int, unsigned> DummyPair(-1, 0); 360 std::vector<std::pair<int, unsigned> > Result; 361 unsigned i = 0; 362 for (unsigned n = Srcs.size(); i < n; ++i) { 363 unsigned Reg = Srcs[i].first->getReg(); 364 int Index = RI.getEncodingValue(Reg) & 0xff; 365 if (Reg == AMDGPU::OQAP) { 366 Result.push_back(std::make_pair(Index, 0U)); 367 } 368 if (PV.find(Reg) != PV.end()) { 369 // 255 is used to tells its a PS/PV reg 370 Result.push_back(std::make_pair(255, 0U)); 371 continue; 372 } 373 if (Index > 127) { 374 ConstCount++; 375 Result.push_back(DummyPair); 376 continue; 377 } 378 unsigned Chan = RI.getHWRegChan(Reg); 379 Result.push_back(std::make_pair(Index, Chan)); 380 } 381 for (; i < 3; ++i) 382 Result.push_back(DummyPair); 383 return Result; 384 } 385 386 static std::vector<std::pair<int, unsigned> > 387 Swizzle(std::vector<std::pair<int, unsigned> > Src, 388 R600InstrInfo::BankSwizzle Swz) { 389 if (Src[0] == Src[1]) 390 Src[1].first = -1; 391 switch (Swz) { 392 case R600InstrInfo::ALU_VEC_012_SCL_210: 393 break; 394 case R600InstrInfo::ALU_VEC_021_SCL_122: 395 std::swap(Src[1], Src[2]); 396 break; 397 case R600InstrInfo::ALU_VEC_102_SCL_221: 398 std::swap(Src[0], Src[1]); 399 break; 400 case R600InstrInfo::ALU_VEC_120_SCL_212: 401 std::swap(Src[0], Src[1]); 402 std::swap(Src[0], Src[2]); 403 break; 404 case R600InstrInfo::ALU_VEC_201: 405 std::swap(Src[0], Src[2]); 406 std::swap(Src[0], Src[1]); 407 break; 408 case R600InstrInfo::ALU_VEC_210: 409 std::swap(Src[0], Src[2]); 410 break; 411 } 412 return Src; 413 } 414 415 static unsigned 416 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) { 417 switch (Swz) { 418 case R600InstrInfo::ALU_VEC_012_SCL_210: { 419 unsigned Cycles[3] = { 2, 1, 0}; 420 return Cycles[Op]; 421 } 422 case R600InstrInfo::ALU_VEC_021_SCL_122: { 423 unsigned Cycles[3] = { 1, 2, 2}; 424 return Cycles[Op]; 425 } 426 case R600InstrInfo::ALU_VEC_120_SCL_212: { 427 unsigned Cycles[3] = { 2, 1, 2}; 428 return Cycles[Op]; 429 } 430 case R600InstrInfo::ALU_VEC_102_SCL_221: { 431 unsigned Cycles[3] = { 2, 2, 1}; 432 return Cycles[Op]; 433 } 434 default: 435 llvm_unreachable("Wrong Swizzle for Trans Slot"); 436 return 0; 437 } 438 } 439 440 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed 441 /// in the same Instruction Group while meeting read port limitations given a 442 /// Swz swizzle sequence. 443 unsigned R600InstrInfo::isLegalUpTo( 444 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, 445 const std::vector<R600InstrInfo::BankSwizzle> &Swz, 446 const std::vector<std::pair<int, unsigned> > &TransSrcs, 447 R600InstrInfo::BankSwizzle TransSwz) const { 448 int Vector[4][3]; 449 memset(Vector, -1, sizeof(Vector)); 450 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) { 451 const std::vector<std::pair<int, unsigned> > &Srcs = 452 Swizzle(IGSrcs[i], Swz[i]); 453 for (unsigned j = 0; j < 3; j++) { 454 const std::pair<int, unsigned> &Src = Srcs[j]; 455 if (Src.first < 0 || Src.first == 255) 456 continue; 457 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) { 458 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 && 459 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) { 460 // The value from output queue A (denoted by register OQAP) can 461 // only be fetched during the first cycle. 462 return false; 463 } 464 // OQAP does not count towards the normal read port restrictions 465 continue; 466 } 467 if (Vector[Src.second][j] < 0) 468 Vector[Src.second][j] = Src.first; 469 if (Vector[Src.second][j] != Src.first) 470 return i; 471 } 472 } 473 // Now check Trans Alu 474 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) { 475 const std::pair<int, unsigned> &Src = TransSrcs[i]; 476 unsigned Cycle = getTransSwizzle(TransSwz, i); 477 if (Src.first < 0) 478 continue; 479 if (Src.first == 255) 480 continue; 481 if (Vector[Src.second][Cycle] < 0) 482 Vector[Src.second][Cycle] = Src.first; 483 if (Vector[Src.second][Cycle] != Src.first) 484 return IGSrcs.size() - 1; 485 } 486 return IGSrcs.size(); 487 } 488 489 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next 490 /// (in lexicographic term) swizzle sequence assuming that all swizzles after 491 /// Idx can be skipped 492 static bool 493 NextPossibleSolution( 494 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, 495 unsigned Idx) { 496 assert(Idx < SwzCandidate.size()); 497 int ResetIdx = Idx; 498 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210) 499 ResetIdx --; 500 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) { 501 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210; 502 } 503 if (ResetIdx == -1) 504 return false; 505 int NextSwizzle = SwzCandidate[ResetIdx] + 1; 506 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle; 507 return true; 508 } 509 510 /// Enumerate all possible Swizzle sequence to find one that can meet all 511 /// read port requirements. 512 bool R600InstrInfo::FindSwizzleForVectorSlot( 513 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, 514 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, 515 const std::vector<std::pair<int, unsigned> > &TransSrcs, 516 R600InstrInfo::BankSwizzle TransSwz) const { 517 unsigned ValidUpTo = 0; 518 do { 519 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz); 520 if (ValidUpTo == IGSrcs.size()) 521 return true; 522 } while (NextPossibleSolution(SwzCandidate, ValidUpTo)); 523 return false; 524 } 525 526 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read 527 /// a const, and can't read a gpr at cycle 1 if they read 2 const. 528 static bool 529 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz, 530 const std::vector<std::pair<int, unsigned> > &TransOps, 531 unsigned ConstCount) { 532 // TransALU can't read 3 constants 533 if (ConstCount > 2) 534 return false; 535 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) { 536 const std::pair<int, unsigned> &Src = TransOps[i]; 537 unsigned Cycle = getTransSwizzle(TransSwz, i); 538 if (Src.first < 0) 539 continue; 540 if (ConstCount > 0 && Cycle == 0) 541 return false; 542 if (ConstCount > 1 && Cycle == 1) 543 return false; 544 } 545 return true; 546 } 547 548 bool 549 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG, 550 const DenseMap<unsigned, unsigned> &PV, 551 std::vector<BankSwizzle> &ValidSwizzle, 552 bool isLastAluTrans) 553 const { 554 //Todo : support shared src0 - src1 operand 555 556 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs; 557 ValidSwizzle.clear(); 558 unsigned ConstCount; 559 BankSwizzle TransBS = ALU_VEC_012_SCL_210; 560 for (unsigned i = 0, e = IG.size(); i < e; ++i) { 561 IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount)); 562 unsigned Op = getOperandIdx(IG[i]->getOpcode(), 563 AMDGPU::OpName::bank_swizzle); 564 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle) 565 IG[i]->getOperand(Op).getImm()); 566 } 567 std::vector<std::pair<int, unsigned> > TransOps; 568 if (!isLastAluTrans) 569 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS); 570 571 TransOps = std::move(IGSrcs.back()); 572 IGSrcs.pop_back(); 573 ValidSwizzle.pop_back(); 574 575 static const R600InstrInfo::BankSwizzle TransSwz[] = { 576 ALU_VEC_012_SCL_210, 577 ALU_VEC_021_SCL_122, 578 ALU_VEC_120_SCL_212, 579 ALU_VEC_102_SCL_221 580 }; 581 for (unsigned i = 0; i < 4; i++) { 582 TransBS = TransSwz[i]; 583 if (!isConstCompatible(TransBS, TransOps, ConstCount)) 584 continue; 585 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, 586 TransBS); 587 if (Result) { 588 ValidSwizzle.push_back(TransBS); 589 return true; 590 } 591 } 592 593 return false; 594 } 595 596 597 bool 598 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts) 599 const { 600 assert (Consts.size() <= 12 && "Too many operands in instructions group"); 601 unsigned Pair1 = 0, Pair2 = 0; 602 for (unsigned i = 0, n = Consts.size(); i < n; ++i) { 603 unsigned ReadConstHalf = Consts[i] & 2; 604 unsigned ReadConstIndex = Consts[i] & (~3); 605 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf; 606 if (!Pair1) { 607 Pair1 = ReadHalfConst; 608 continue; 609 } 610 if (Pair1 == ReadHalfConst) 611 continue; 612 if (!Pair2) { 613 Pair2 = ReadHalfConst; 614 continue; 615 } 616 if (Pair2 != ReadHalfConst) 617 return false; 618 } 619 return true; 620 } 621 622 bool 623 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs) 624 const { 625 std::vector<unsigned> Consts; 626 SmallSet<int64_t, 4> Literals; 627 for (unsigned i = 0, n = MIs.size(); i < n; i++) { 628 MachineInstr *MI = MIs[i]; 629 if (!isALUInstr(MI->getOpcode())) 630 continue; 631 632 ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI); 633 634 for (const auto &Src:Srcs) { 635 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X) 636 Literals.insert(Src.second); 637 if (Literals.size() > 4) 638 return false; 639 if (Src.first->getReg() == AMDGPU::ALU_CONST) 640 Consts.push_back(Src.second); 641 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) || 642 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) { 643 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff; 644 unsigned Chan = RI.getHWRegChan(Src.first->getReg()); 645 Consts.push_back((Index << 2) | Chan); 646 } 647 } 648 } 649 return fitsConstReadLimitations(Consts); 650 } 651 652 DFAPacketizer * 653 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const { 654 const InstrItineraryData *II = STI.getInstrItineraryData(); 655 return static_cast<const AMDGPUSubtarget &>(STI).createDFAPacketizer(II); 656 } 657 658 static bool 659 isPredicateSetter(unsigned Opcode) { 660 switch (Opcode) { 661 case AMDGPU::PRED_X: 662 return true; 663 default: 664 return false; 665 } 666 } 667 668 static MachineInstr * 669 findFirstPredicateSetterFrom(MachineBasicBlock &MBB, 670 MachineBasicBlock::iterator I) { 671 while (I != MBB.begin()) { 672 --I; 673 MachineInstr *MI = I; 674 if (isPredicateSetter(MI->getOpcode())) 675 return MI; 676 } 677 678 return nullptr; 679 } 680 681 static 682 bool isJump(unsigned Opcode) { 683 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND; 684 } 685 686 static bool isBranch(unsigned Opcode) { 687 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 || 688 Opcode == AMDGPU::BRANCH_COND_f32; 689 } 690 691 bool 692 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 693 MachineBasicBlock *&TBB, 694 MachineBasicBlock *&FBB, 695 SmallVectorImpl<MachineOperand> &Cond, 696 bool AllowModify) const { 697 // Most of the following comes from the ARM implementation of AnalyzeBranch 698 699 // If the block has no terminators, it just falls into the block after it. 700 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 701 if (I == MBB.end()) 702 return false; 703 704 // AMDGPU::BRANCH* instructions are only available after isel and are not 705 // handled 706 if (isBranch(I->getOpcode())) 707 return true; 708 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) { 709 return false; 710 } 711 712 // Remove successive JUMP 713 while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) { 714 MachineBasicBlock::iterator PriorI = std::prev(I); 715 if (AllowModify) 716 I->removeFromParent(); 717 I = PriorI; 718 } 719 MachineInstr *LastInst = I; 720 721 // If there is only one terminator instruction, process it. 722 unsigned LastOpc = LastInst->getOpcode(); 723 if (I == MBB.begin() || 724 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) { 725 if (LastOpc == AMDGPU::JUMP) { 726 TBB = LastInst->getOperand(0).getMBB(); 727 return false; 728 } else if (LastOpc == AMDGPU::JUMP_COND) { 729 MachineInstr *predSet = I; 730 while (!isPredicateSetter(predSet->getOpcode())) { 731 predSet = --I; 732 } 733 TBB = LastInst->getOperand(0).getMBB(); 734 Cond.push_back(predSet->getOperand(1)); 735 Cond.push_back(predSet->getOperand(2)); 736 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false)); 737 return false; 738 } 739 return true; // Can't handle indirect branch. 740 } 741 742 // Get the instruction before it if it is a terminator. 743 MachineInstr *SecondLastInst = I; 744 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 745 746 // If the block ends with a B and a Bcc, handle it. 747 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) { 748 MachineInstr *predSet = --I; 749 while (!isPredicateSetter(predSet->getOpcode())) { 750 predSet = --I; 751 } 752 TBB = SecondLastInst->getOperand(0).getMBB(); 753 FBB = LastInst->getOperand(0).getMBB(); 754 Cond.push_back(predSet->getOperand(1)); 755 Cond.push_back(predSet->getOperand(2)); 756 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false)); 757 return false; 758 } 759 760 // Otherwise, can't handle this. 761 return true; 762 } 763 764 static 765 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) { 766 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend(); 767 It != E; ++It) { 768 if (It->getOpcode() == AMDGPU::CF_ALU || 769 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE) 770 return std::prev(It.base()); 771 } 772 return MBB.end(); 773 } 774 775 unsigned 776 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB, 777 MachineBasicBlock *TBB, 778 MachineBasicBlock *FBB, 779 ArrayRef<MachineOperand> Cond, 780 DebugLoc DL) const { 781 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 782 783 if (!FBB) { 784 if (Cond.empty()) { 785 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB); 786 return 1; 787 } else { 788 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end()); 789 assert(PredSet && "No previous predicate !"); 790 addFlag(PredSet, 0, MO_FLAG_PUSH); 791 PredSet->getOperand(2).setImm(Cond[1].getImm()); 792 793 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) 794 .addMBB(TBB) 795 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 796 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 797 if (CfAlu == MBB.end()) 798 return 1; 799 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU); 800 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE)); 801 return 1; 802 } 803 } else { 804 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end()); 805 assert(PredSet && "No previous predicate !"); 806 addFlag(PredSet, 0, MO_FLAG_PUSH); 807 PredSet->getOperand(2).setImm(Cond[1].getImm()); 808 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) 809 .addMBB(TBB) 810 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 811 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB); 812 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 813 if (CfAlu == MBB.end()) 814 return 2; 815 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU); 816 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE)); 817 return 2; 818 } 819 } 820 821 unsigned 822 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 823 824 // Note : we leave PRED* instructions there. 825 // They may be needed when predicating instructions. 826 827 MachineBasicBlock::iterator I = MBB.end(); 828 829 if (I == MBB.begin()) { 830 return 0; 831 } 832 --I; 833 switch (I->getOpcode()) { 834 default: 835 return 0; 836 case AMDGPU::JUMP_COND: { 837 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); 838 clearFlag(predSet, 0, MO_FLAG_PUSH); 839 I->eraseFromParent(); 840 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 841 if (CfAlu == MBB.end()) 842 break; 843 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE); 844 CfAlu->setDesc(get(AMDGPU::CF_ALU)); 845 break; 846 } 847 case AMDGPU::JUMP: 848 I->eraseFromParent(); 849 break; 850 } 851 I = MBB.end(); 852 853 if (I == MBB.begin()) { 854 return 1; 855 } 856 --I; 857 switch (I->getOpcode()) { 858 // FIXME: only one case?? 859 default: 860 return 1; 861 case AMDGPU::JUMP_COND: { 862 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); 863 clearFlag(predSet, 0, MO_FLAG_PUSH); 864 I->eraseFromParent(); 865 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 866 if (CfAlu == MBB.end()) 867 break; 868 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE); 869 CfAlu->setDesc(get(AMDGPU::CF_ALU)); 870 break; 871 } 872 case AMDGPU::JUMP: 873 I->eraseFromParent(); 874 break; 875 } 876 return 2; 877 } 878 879 bool R600InstrInfo::isPredicated(const MachineInstr &MI) const { 880 int idx = MI.findFirstPredOperandIdx(); 881 if (idx < 0) 882 return false; 883 884 unsigned Reg = MI.getOperand(idx).getReg(); 885 switch (Reg) { 886 default: return false; 887 case AMDGPU::PRED_SEL_ONE: 888 case AMDGPU::PRED_SEL_ZERO: 889 case AMDGPU::PREDICATE_BIT: 890 return true; 891 } 892 } 893 894 bool R600InstrInfo::isPredicable(MachineInstr &MI) const { 895 // XXX: KILL* instructions can be predicated, but they must be the last 896 // instruction in a clause, so this means any instructions after them cannot 897 // be predicated. Until we have proper support for instruction clauses in the 898 // backend, we will mark KILL* instructions as unpredicable. 899 900 if (MI.getOpcode() == AMDGPU::KILLGT) { 901 return false; 902 } else if (MI.getOpcode() == AMDGPU::CF_ALU) { 903 // If the clause start in the middle of MBB then the MBB has more 904 // than a single clause, unable to predicate several clauses. 905 if (MI.getParent()->begin() != MachineBasicBlock::iterator(MI)) 906 return false; 907 // TODO: We don't support KC merging atm 908 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0; 909 } else if (isVector(MI)) { 910 return false; 911 } else { 912 return AMDGPUInstrInfo::isPredicable(MI); 913 } 914 } 915 916 917 bool 918 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB, 919 unsigned NumCyles, 920 unsigned ExtraPredCycles, 921 BranchProbability Probability) const{ 922 return true; 923 } 924 925 bool 926 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, 927 unsigned NumTCycles, 928 unsigned ExtraTCycles, 929 MachineBasicBlock &FMBB, 930 unsigned NumFCycles, 931 unsigned ExtraFCycles, 932 BranchProbability Probability) const { 933 return true; 934 } 935 936 bool 937 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB, 938 unsigned NumCyles, 939 BranchProbability Probability) 940 const { 941 return true; 942 } 943 944 bool 945 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, 946 MachineBasicBlock &FMBB) const { 947 return false; 948 } 949 950 951 bool 952 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 953 MachineOperand &MO = Cond[1]; 954 switch (MO.getImm()) { 955 case OPCODE_IS_ZERO_INT: 956 MO.setImm(OPCODE_IS_NOT_ZERO_INT); 957 break; 958 case OPCODE_IS_NOT_ZERO_INT: 959 MO.setImm(OPCODE_IS_ZERO_INT); 960 break; 961 case OPCODE_IS_ZERO: 962 MO.setImm(OPCODE_IS_NOT_ZERO); 963 break; 964 case OPCODE_IS_NOT_ZERO: 965 MO.setImm(OPCODE_IS_ZERO); 966 break; 967 default: 968 return true; 969 } 970 971 MachineOperand &MO2 = Cond[2]; 972 switch (MO2.getReg()) { 973 case AMDGPU::PRED_SEL_ZERO: 974 MO2.setReg(AMDGPU::PRED_SEL_ONE); 975 break; 976 case AMDGPU::PRED_SEL_ONE: 977 MO2.setReg(AMDGPU::PRED_SEL_ZERO); 978 break; 979 default: 980 return true; 981 } 982 return false; 983 } 984 985 bool R600InstrInfo::DefinesPredicate(MachineInstr &MI, 986 std::vector<MachineOperand> &Pred) const { 987 return isPredicateSetter(MI.getOpcode()); 988 } 989 990 991 bool 992 R600InstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 993 ArrayRef<MachineOperand> Pred2) const { 994 return false; 995 } 996 997 bool R600InstrInfo::PredicateInstruction(MachineInstr &MI, 998 ArrayRef<MachineOperand> Pred) const { 999 int PIdx = MI.findFirstPredOperandIdx(); 1000 1001 if (MI.getOpcode() == AMDGPU::CF_ALU) { 1002 MI.getOperand(8).setImm(0); 1003 return true; 1004 } 1005 1006 if (MI.getOpcode() == AMDGPU::DOT_4) { 1007 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_X)) 1008 .setReg(Pred[2].getReg()); 1009 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Y)) 1010 .setReg(Pred[2].getReg()); 1011 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Z)) 1012 .setReg(Pred[2].getReg()); 1013 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_W)) 1014 .setReg(Pred[2].getReg()); 1015 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1016 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit); 1017 return true; 1018 } 1019 1020 if (PIdx != -1) { 1021 MachineOperand &PMO = MI.getOperand(PIdx); 1022 PMO.setReg(Pred[2].getReg()); 1023 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1024 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit); 1025 return true; 1026 } 1027 1028 return false; 1029 } 1030 1031 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const { 1032 return 2; 1033 } 1034 1035 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1036 const MachineInstr *MI, 1037 unsigned *PredCost) const { 1038 if (PredCost) 1039 *PredCost = 2; 1040 return 2; 1041 } 1042 1043 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex, 1044 unsigned Channel) const { 1045 assert(Channel == 0); 1046 return RegIndex; 1047 } 1048 1049 bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 1050 1051 switch(MI->getOpcode()) { 1052 default: { 1053 MachineBasicBlock *MBB = MI->getParent(); 1054 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1055 AMDGPU::OpName::addr); 1056 // addr is a custom operand with multiple MI operands, and only the 1057 // first MI operand is given a name. 1058 int RegOpIdx = OffsetOpIdx + 1; 1059 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1060 AMDGPU::OpName::chan); 1061 if (isRegisterLoad(*MI)) { 1062 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1063 AMDGPU::OpName::dst); 1064 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm(); 1065 unsigned Channel = MI->getOperand(ChanOpIdx).getImm(); 1066 unsigned Address = calculateIndirectAddress(RegIndex, Channel); 1067 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg(); 1068 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) { 1069 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(), 1070 getIndirectAddrRegClass()->getRegister(Address)); 1071 } else { 1072 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(), 1073 Address, OffsetReg); 1074 } 1075 } else if (isRegisterStore(*MI)) { 1076 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1077 AMDGPU::OpName::val); 1078 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm(); 1079 unsigned Channel = MI->getOperand(ChanOpIdx).getImm(); 1080 unsigned Address = calculateIndirectAddress(RegIndex, Channel); 1081 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg(); 1082 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) { 1083 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address), 1084 MI->getOperand(ValOpIdx).getReg()); 1085 } else { 1086 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(), 1087 calculateIndirectAddress(RegIndex, Channel), 1088 OffsetReg); 1089 } 1090 } else { 1091 return false; 1092 } 1093 1094 MBB->erase(MI); 1095 return true; 1096 } 1097 case AMDGPU::R600_EXTRACT_ELT_V2: 1098 case AMDGPU::R600_EXTRACT_ELT_V4: 1099 buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(), 1100 RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address 1101 MI->getOperand(2).getReg(), 1102 RI.getHWRegChan(MI->getOperand(1).getReg())); 1103 break; 1104 case AMDGPU::R600_INSERT_ELT_V2: 1105 case AMDGPU::R600_INSERT_ELT_V4: 1106 buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value 1107 RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address 1108 MI->getOperand(3).getReg(), // Offset 1109 RI.getHWRegChan(MI->getOperand(1).getReg())); // Channel 1110 break; 1111 } 1112 MI->eraseFromParent(); 1113 return true; 1114 } 1115 1116 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved, 1117 const MachineFunction &MF) const { 1118 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>( 1119 MF.getSubtarget().getFrameLowering()); 1120 1121 unsigned StackWidth = TFL->getStackWidth(MF); 1122 int End = getIndirectIndexEnd(MF); 1123 1124 if (End == -1) 1125 return; 1126 1127 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) { 1128 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index); 1129 Reserved.set(SuperReg); 1130 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) { 1131 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan); 1132 Reserved.set(Reg); 1133 } 1134 } 1135 } 1136 1137 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const { 1138 return &AMDGPU::R600_TReg32_XRegClass; 1139 } 1140 1141 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, 1142 MachineBasicBlock::iterator I, 1143 unsigned ValueReg, unsigned Address, 1144 unsigned OffsetReg) const { 1145 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0); 1146 } 1147 1148 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, 1149 MachineBasicBlock::iterator I, 1150 unsigned ValueReg, unsigned Address, 1151 unsigned OffsetReg, 1152 unsigned AddrChan) const { 1153 unsigned AddrReg; 1154 switch (AddrChan) { 1155 default: llvm_unreachable("Invalid Channel"); 1156 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break; 1157 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break; 1158 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break; 1159 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break; 1160 } 1161 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, 1162 AMDGPU::AR_X, OffsetReg); 1163 setImmOperand(MOVA, AMDGPU::OpName::write, 0); 1164 1165 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, 1166 AddrReg, ValueReg) 1167 .addReg(AMDGPU::AR_X, 1168 RegState::Implicit | RegState::Kill); 1169 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1); 1170 return Mov; 1171 } 1172 1173 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, 1174 MachineBasicBlock::iterator I, 1175 unsigned ValueReg, unsigned Address, 1176 unsigned OffsetReg) const { 1177 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0); 1178 } 1179 1180 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, 1181 MachineBasicBlock::iterator I, 1182 unsigned ValueReg, unsigned Address, 1183 unsigned OffsetReg, 1184 unsigned AddrChan) const { 1185 unsigned AddrReg; 1186 switch (AddrChan) { 1187 default: llvm_unreachable("Invalid Channel"); 1188 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break; 1189 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break; 1190 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break; 1191 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break; 1192 } 1193 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, 1194 AMDGPU::AR_X, 1195 OffsetReg); 1196 setImmOperand(MOVA, AMDGPU::OpName::write, 0); 1197 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, 1198 ValueReg, 1199 AddrReg) 1200 .addReg(AMDGPU::AR_X, 1201 RegState::Implicit | RegState::Kill); 1202 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1); 1203 1204 return Mov; 1205 } 1206 1207 unsigned R600InstrInfo::getMaxAlusPerClause() const { 1208 return 115; 1209 } 1210 1211 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB, 1212 MachineBasicBlock::iterator I, 1213 unsigned Opcode, 1214 unsigned DstReg, 1215 unsigned Src0Reg, 1216 unsigned Src1Reg) const { 1217 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode), 1218 DstReg); // $dst 1219 1220 if (Src1Reg) { 1221 MIB.addImm(0) // $update_exec_mask 1222 .addImm(0); // $update_predicate 1223 } 1224 MIB.addImm(1) // $write 1225 .addImm(0) // $omod 1226 .addImm(0) // $dst_rel 1227 .addImm(0) // $dst_clamp 1228 .addReg(Src0Reg) // $src0 1229 .addImm(0) // $src0_neg 1230 .addImm(0) // $src0_rel 1231 .addImm(0) // $src0_abs 1232 .addImm(-1); // $src0_sel 1233 1234 if (Src1Reg) { 1235 MIB.addReg(Src1Reg) // $src1 1236 .addImm(0) // $src1_neg 1237 .addImm(0) // $src1_rel 1238 .addImm(0) // $src1_abs 1239 .addImm(-1); // $src1_sel 1240 } 1241 1242 //XXX: The r600g finalizer expects this to be 1, once we've moved the 1243 //scheduling to the backend, we can change the default to 0. 1244 MIB.addImm(1) // $last 1245 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel 1246 .addImm(0) // $literal 1247 .addImm(0); // $bank_swizzle 1248 1249 return MIB; 1250 } 1251 1252 #define OPERAND_CASE(Label) \ 1253 case Label: { \ 1254 static const unsigned Ops[] = \ 1255 { \ 1256 Label##_X, \ 1257 Label##_Y, \ 1258 Label##_Z, \ 1259 Label##_W \ 1260 }; \ 1261 return Ops[Slot]; \ 1262 } 1263 1264 static unsigned getSlotedOps(unsigned Op, unsigned Slot) { 1265 switch (Op) { 1266 OPERAND_CASE(AMDGPU::OpName::update_exec_mask) 1267 OPERAND_CASE(AMDGPU::OpName::update_pred) 1268 OPERAND_CASE(AMDGPU::OpName::write) 1269 OPERAND_CASE(AMDGPU::OpName::omod) 1270 OPERAND_CASE(AMDGPU::OpName::dst_rel) 1271 OPERAND_CASE(AMDGPU::OpName::clamp) 1272 OPERAND_CASE(AMDGPU::OpName::src0) 1273 OPERAND_CASE(AMDGPU::OpName::src0_neg) 1274 OPERAND_CASE(AMDGPU::OpName::src0_rel) 1275 OPERAND_CASE(AMDGPU::OpName::src0_abs) 1276 OPERAND_CASE(AMDGPU::OpName::src0_sel) 1277 OPERAND_CASE(AMDGPU::OpName::src1) 1278 OPERAND_CASE(AMDGPU::OpName::src1_neg) 1279 OPERAND_CASE(AMDGPU::OpName::src1_rel) 1280 OPERAND_CASE(AMDGPU::OpName::src1_abs) 1281 OPERAND_CASE(AMDGPU::OpName::src1_sel) 1282 OPERAND_CASE(AMDGPU::OpName::pred_sel) 1283 default: 1284 llvm_unreachable("Wrong Operand"); 1285 } 1286 } 1287 1288 #undef OPERAND_CASE 1289 1290 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( 1291 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg) 1292 const { 1293 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented"); 1294 unsigned Opcode; 1295 if (ST.getGeneration() <= AMDGPUSubtarget::R700) 1296 Opcode = AMDGPU::DOT4_r600; 1297 else 1298 Opcode = AMDGPU::DOT4_eg; 1299 MachineBasicBlock::iterator I = MI; 1300 MachineOperand &Src0 = MI->getOperand( 1301 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot))); 1302 MachineOperand &Src1 = MI->getOperand( 1303 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot))); 1304 MachineInstr *MIB = buildDefaultInstruction( 1305 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg()); 1306 static const unsigned Operands[14] = { 1307 AMDGPU::OpName::update_exec_mask, 1308 AMDGPU::OpName::update_pred, 1309 AMDGPU::OpName::write, 1310 AMDGPU::OpName::omod, 1311 AMDGPU::OpName::dst_rel, 1312 AMDGPU::OpName::clamp, 1313 AMDGPU::OpName::src0_neg, 1314 AMDGPU::OpName::src0_rel, 1315 AMDGPU::OpName::src0_abs, 1316 AMDGPU::OpName::src0_sel, 1317 AMDGPU::OpName::src1_neg, 1318 AMDGPU::OpName::src1_rel, 1319 AMDGPU::OpName::src1_abs, 1320 AMDGPU::OpName::src1_sel, 1321 }; 1322 1323 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), 1324 getSlotedOps(AMDGPU::OpName::pred_sel, Slot))); 1325 MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel)) 1326 .setReg(MO.getReg()); 1327 1328 for (unsigned i = 0; i < 14; i++) { 1329 MachineOperand &MO = MI->getOperand( 1330 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot))); 1331 assert (MO.isImm()); 1332 setImmOperand(MIB, Operands[i], MO.getImm()); 1333 } 1334 MIB->getOperand(20).setImm(0); 1335 return MIB; 1336 } 1337 1338 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB, 1339 MachineBasicBlock::iterator I, 1340 unsigned DstReg, 1341 uint64_t Imm) const { 1342 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg, 1343 AMDGPU::ALU_LITERAL_X); 1344 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm); 1345 return MovImm; 1346 } 1347 1348 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB, 1349 MachineBasicBlock::iterator I, 1350 unsigned DstReg, unsigned SrcReg) const { 1351 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg); 1352 } 1353 1354 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const { 1355 return getOperandIdx(MI.getOpcode(), Op); 1356 } 1357 1358 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const { 1359 return AMDGPU::getNamedOperandIdx(Opcode, Op); 1360 } 1361 1362 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op, 1363 int64_t Imm) const { 1364 int Idx = getOperandIdx(*MI, Op); 1365 assert(Idx != -1 && "Operand not supported for this instruction."); 1366 assert(MI->getOperand(Idx).isImm()); 1367 MI->getOperand(Idx).setImm(Imm); 1368 } 1369 1370 //===----------------------------------------------------------------------===// 1371 // Instruction flag getters/setters 1372 //===----------------------------------------------------------------------===// 1373 1374 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const { 1375 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0; 1376 } 1377 1378 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx, 1379 unsigned Flag) const { 1380 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1381 int FlagIndex = 0; 1382 if (Flag != 0) { 1383 // If we pass something other than the default value of Flag to this 1384 // function, it means we are want to set a flag on an instruction 1385 // that uses native encoding. 1386 assert(HAS_NATIVE_OPERANDS(TargetFlags)); 1387 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3; 1388 switch (Flag) { 1389 case MO_FLAG_CLAMP: 1390 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp); 1391 break; 1392 case MO_FLAG_MASK: 1393 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write); 1394 break; 1395 case MO_FLAG_NOT_LAST: 1396 case MO_FLAG_LAST: 1397 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last); 1398 break; 1399 case MO_FLAG_NEG: 1400 switch (SrcIdx) { 1401 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break; 1402 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break; 1403 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break; 1404 } 1405 break; 1406 1407 case MO_FLAG_ABS: 1408 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 " 1409 "instructions."); 1410 (void)IsOP3; 1411 switch (SrcIdx) { 1412 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break; 1413 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break; 1414 } 1415 break; 1416 1417 default: 1418 FlagIndex = -1; 1419 break; 1420 } 1421 assert(FlagIndex != -1 && "Flag not supported for this instruction"); 1422 } else { 1423 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags); 1424 assert(FlagIndex != 0 && 1425 "Instruction flags not supported for this instruction"); 1426 } 1427 1428 MachineOperand &FlagOp = MI->getOperand(FlagIndex); 1429 assert(FlagOp.isImm()); 1430 return FlagOp; 1431 } 1432 1433 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand, 1434 unsigned Flag) const { 1435 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1436 if (Flag == 0) { 1437 return; 1438 } 1439 if (HAS_NATIVE_OPERANDS(TargetFlags)) { 1440 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); 1441 if (Flag == MO_FLAG_NOT_LAST) { 1442 clearFlag(MI, Operand, MO_FLAG_LAST); 1443 } else if (Flag == MO_FLAG_MASK) { 1444 clearFlag(MI, Operand, Flag); 1445 } else { 1446 FlagOp.setImm(1); 1447 } 1448 } else { 1449 MachineOperand &FlagOp = getFlagOp(MI, Operand); 1450 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand))); 1451 } 1452 } 1453 1454 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand, 1455 unsigned Flag) const { 1456 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1457 if (HAS_NATIVE_OPERANDS(TargetFlags)) { 1458 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); 1459 FlagOp.setImm(0); 1460 } else { 1461 MachineOperand &FlagOp = getFlagOp(MI); 1462 unsigned InstFlags = FlagOp.getImm(); 1463 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand)); 1464 FlagOp.setImm(InstFlags); 1465 } 1466 } 1467 1468 bool R600InstrInfo::isRegisterStore(const MachineInstr &MI) const { 1469 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE; 1470 } 1471 1472 bool R600InstrInfo::isRegisterLoad(const MachineInstr &MI) const { 1473 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD; 1474 } 1475