1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief R600 Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "R600InstrInfo.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUSubtarget.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "R600Defines.h" 20 #include "R600MachineFunctionInfo.h" 21 #include "R600RegisterInfo.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 26 using namespace llvm; 27 28 #define GET_INSTRINFO_CTOR_DTOR 29 #include "AMDGPUGenDFAPacketizer.inc" 30 31 R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st) 32 : AMDGPUInstrInfo(st), RI() {} 33 34 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const { 35 return RI; 36 } 37 38 bool R600InstrInfo::isTrig(const MachineInstr &MI) const { 39 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG; 40 } 41 42 bool R600InstrInfo::isVector(const MachineInstr &MI) const { 43 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR; 44 } 45 46 void 47 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 48 MachineBasicBlock::iterator MI, DebugLoc DL, 49 unsigned DestReg, unsigned SrcReg, 50 bool KillSrc) const { 51 unsigned VectorComponents = 0; 52 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) || 53 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) && 54 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) || 55 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) { 56 VectorComponents = 4; 57 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) || 58 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) && 59 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) || 60 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) { 61 VectorComponents = 2; 62 } 63 64 if (VectorComponents > 0) { 65 for (unsigned I = 0; I < VectorComponents; I++) { 66 unsigned SubRegIndex = RI.getSubRegFromChannel(I); 67 buildDefaultInstruction(MBB, MI, AMDGPU::MOV, 68 RI.getSubReg(DestReg, SubRegIndex), 69 RI.getSubReg(SrcReg, SubRegIndex)) 70 .addReg(DestReg, 71 RegState::Define | RegState::Implicit); 72 } 73 } else { 74 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV, 75 DestReg, SrcReg); 76 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0)) 77 .setIsKill(KillSrc); 78 } 79 } 80 81 /// \returns true if \p MBBI can be moved into a new basic. 82 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 83 MachineBasicBlock::iterator MBBI) const { 84 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(), 85 E = MBBI->operands_end(); I != E; ++I) { 86 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) && 87 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg())) 88 return false; 89 } 90 return true; 91 } 92 93 bool R600InstrInfo::isMov(unsigned Opcode) const { 94 95 96 switch(Opcode) { 97 default: return false; 98 case AMDGPU::MOV: 99 case AMDGPU::MOV_IMM_F32: 100 case AMDGPU::MOV_IMM_I32: 101 return true; 102 } 103 } 104 105 // Some instructions act as place holders to emulate operations that the GPU 106 // hardware does automatically. This function can be used to check if 107 // an opcode falls into this category. 108 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const { 109 switch (Opcode) { 110 default: return false; 111 case AMDGPU::RETURN: 112 return true; 113 } 114 } 115 116 bool R600InstrInfo::isReductionOp(unsigned Opcode) const { 117 return false; 118 } 119 120 bool R600InstrInfo::isCubeOp(unsigned Opcode) const { 121 switch(Opcode) { 122 default: return false; 123 case AMDGPU::CUBE_r600_pseudo: 124 case AMDGPU::CUBE_r600_real: 125 case AMDGPU::CUBE_eg_pseudo: 126 case AMDGPU::CUBE_eg_real: 127 return true; 128 } 129 } 130 131 bool R600InstrInfo::isALUInstr(unsigned Opcode) const { 132 unsigned TargetFlags = get(Opcode).TSFlags; 133 134 return (TargetFlags & R600_InstFlag::ALU_INST); 135 } 136 137 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const { 138 unsigned TargetFlags = get(Opcode).TSFlags; 139 140 return ((TargetFlags & R600_InstFlag::OP1) | 141 (TargetFlags & R600_InstFlag::OP2) | 142 (TargetFlags & R600_InstFlag::OP3)); 143 } 144 145 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const { 146 unsigned TargetFlags = get(Opcode).TSFlags; 147 148 return ((TargetFlags & R600_InstFlag::LDS_1A) | 149 (TargetFlags & R600_InstFlag::LDS_1A1D) | 150 (TargetFlags & R600_InstFlag::LDS_1A2D)); 151 } 152 153 bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const { 154 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1; 155 } 156 157 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const { 158 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1; 159 } 160 161 bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const { 162 if (isALUInstr(MI->getOpcode())) 163 return true; 164 if (isVector(*MI) || isCubeOp(MI->getOpcode())) 165 return true; 166 switch (MI->getOpcode()) { 167 case AMDGPU::PRED_X: 168 case AMDGPU::INTERP_PAIR_XY: 169 case AMDGPU::INTERP_PAIR_ZW: 170 case AMDGPU::INTERP_VEC_LOAD: 171 case AMDGPU::COPY: 172 case AMDGPU::DOT_4: 173 return true; 174 default: 175 return false; 176 } 177 } 178 179 bool R600InstrInfo::isTransOnly(unsigned Opcode) const { 180 if (ST.hasCaymanISA()) 181 return false; 182 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU); 183 } 184 185 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const { 186 return isTransOnly(MI->getOpcode()); 187 } 188 189 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const { 190 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU); 191 } 192 193 bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const { 194 return isVectorOnly(MI->getOpcode()); 195 } 196 197 bool R600InstrInfo::isExport(unsigned Opcode) const { 198 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT); 199 } 200 201 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { 202 return ST.hasVertexCache() && IS_VTX(get(Opcode)); 203 } 204 205 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const { 206 const MachineFunction *MF = MI->getParent()->getParent(); 207 return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) && 208 usesVertexCache(MI->getOpcode()); 209 } 210 211 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { 212 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode)); 213 } 214 215 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const { 216 const MachineFunction *MF = MI->getParent()->getParent(); 217 return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) && 218 usesVertexCache(MI->getOpcode())) || 219 usesTextureCache(MI->getOpcode()); 220 } 221 222 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const { 223 switch (Opcode) { 224 case AMDGPU::KILLGT: 225 case AMDGPU::GROUP_BARRIER: 226 return true; 227 default: 228 return false; 229 } 230 } 231 232 bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const { 233 return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1; 234 } 235 236 bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const { 237 return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1; 238 } 239 240 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const { 241 if (!isALUInstr(MI->getOpcode())) { 242 return false; 243 } 244 for (MachineInstr::const_mop_iterator I = MI->operands_begin(), 245 E = MI->operands_end(); I != E; ++I) { 246 if (!I->isReg() || !I->isUse() || 247 TargetRegisterInfo::isVirtualRegister(I->getReg())) 248 continue; 249 250 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg())) 251 return true; 252 } 253 return false; 254 } 255 256 int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const { 257 static const unsigned OpTable[] = { 258 AMDGPU::OpName::src0, 259 AMDGPU::OpName::src1, 260 AMDGPU::OpName::src2 261 }; 262 263 assert (SrcNum < 3); 264 return getOperandIdx(Opcode, OpTable[SrcNum]); 265 } 266 267 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const { 268 static const unsigned SrcSelTable[][2] = { 269 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, 270 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, 271 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, 272 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, 273 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, 274 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, 275 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, 276 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, 277 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, 278 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, 279 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W} 280 }; 281 282 for (const auto &Row : SrcSelTable) { 283 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) { 284 return getOperandIdx(Opcode, Row[1]); 285 } 286 } 287 return -1; 288 } 289 290 SmallVector<std::pair<MachineOperand *, int64_t>, 3> 291 R600InstrInfo::getSrcs(MachineInstr *MI) const { 292 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result; 293 294 if (MI->getOpcode() == AMDGPU::DOT_4) { 295 static const unsigned OpTable[8][2] = { 296 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, 297 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, 298 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, 299 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, 300 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, 301 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, 302 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, 303 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}, 304 }; 305 306 for (unsigned j = 0; j < 8; j++) { 307 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), 308 OpTable[j][0])); 309 unsigned Reg = MO.getReg(); 310 if (Reg == AMDGPU::ALU_CONST) { 311 unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(), 312 OpTable[j][1])).getImm(); 313 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel)); 314 continue; 315 } 316 317 } 318 return Result; 319 } 320 321 static const unsigned OpTable[3][2] = { 322 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, 323 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, 324 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, 325 }; 326 327 for (unsigned j = 0; j < 3; j++) { 328 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]); 329 if (SrcIdx < 0) 330 break; 331 MachineOperand &MO = MI->getOperand(SrcIdx); 332 unsigned Reg = MI->getOperand(SrcIdx).getReg(); 333 if (Reg == AMDGPU::ALU_CONST) { 334 unsigned Sel = MI->getOperand( 335 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm(); 336 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel)); 337 continue; 338 } 339 if (Reg == AMDGPU::ALU_LITERAL_X) { 340 unsigned Imm = MI->getOperand( 341 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm(); 342 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm)); 343 continue; 344 } 345 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0)); 346 } 347 return Result; 348 } 349 350 std::vector<std::pair<int, unsigned> > 351 R600InstrInfo::ExtractSrcs(MachineInstr *MI, 352 const DenseMap<unsigned, unsigned> &PV, 353 unsigned &ConstCount) const { 354 ConstCount = 0; 355 ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI); 356 const std::pair<int, unsigned> DummyPair(-1, 0); 357 std::vector<std::pair<int, unsigned> > Result; 358 unsigned i = 0; 359 for (unsigned n = Srcs.size(); i < n; ++i) { 360 unsigned Reg = Srcs[i].first->getReg(); 361 unsigned Index = RI.getEncodingValue(Reg) & 0xff; 362 if (Reg == AMDGPU::OQAP) { 363 Result.push_back(std::pair<int, unsigned>(Index, 0)); 364 } 365 if (PV.find(Reg) != PV.end()) { 366 // 255 is used to tells its a PS/PV reg 367 Result.push_back(std::pair<int, unsigned>(255, 0)); 368 continue; 369 } 370 if (Index > 127) { 371 ConstCount++; 372 Result.push_back(DummyPair); 373 continue; 374 } 375 unsigned Chan = RI.getHWRegChan(Reg); 376 Result.push_back(std::pair<int, unsigned>(Index, Chan)); 377 } 378 for (; i < 3; ++i) 379 Result.push_back(DummyPair); 380 return Result; 381 } 382 383 static std::vector<std::pair<int, unsigned> > 384 Swizzle(std::vector<std::pair<int, unsigned> > Src, 385 R600InstrInfo::BankSwizzle Swz) { 386 if (Src[0] == Src[1]) 387 Src[1].first = -1; 388 switch (Swz) { 389 case R600InstrInfo::ALU_VEC_012_SCL_210: 390 break; 391 case R600InstrInfo::ALU_VEC_021_SCL_122: 392 std::swap(Src[1], Src[2]); 393 break; 394 case R600InstrInfo::ALU_VEC_102_SCL_221: 395 std::swap(Src[0], Src[1]); 396 break; 397 case R600InstrInfo::ALU_VEC_120_SCL_212: 398 std::swap(Src[0], Src[1]); 399 std::swap(Src[0], Src[2]); 400 break; 401 case R600InstrInfo::ALU_VEC_201: 402 std::swap(Src[0], Src[2]); 403 std::swap(Src[0], Src[1]); 404 break; 405 case R600InstrInfo::ALU_VEC_210: 406 std::swap(Src[0], Src[2]); 407 break; 408 } 409 return Src; 410 } 411 412 static unsigned 413 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) { 414 switch (Swz) { 415 case R600InstrInfo::ALU_VEC_012_SCL_210: { 416 unsigned Cycles[3] = { 2, 1, 0}; 417 return Cycles[Op]; 418 } 419 case R600InstrInfo::ALU_VEC_021_SCL_122: { 420 unsigned Cycles[3] = { 1, 2, 2}; 421 return Cycles[Op]; 422 } 423 case R600InstrInfo::ALU_VEC_120_SCL_212: { 424 unsigned Cycles[3] = { 2, 1, 2}; 425 return Cycles[Op]; 426 } 427 case R600InstrInfo::ALU_VEC_102_SCL_221: { 428 unsigned Cycles[3] = { 2, 2, 1}; 429 return Cycles[Op]; 430 } 431 default: 432 llvm_unreachable("Wrong Swizzle for Trans Slot"); 433 return 0; 434 } 435 } 436 437 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed 438 /// in the same Instruction Group while meeting read port limitations given a 439 /// Swz swizzle sequence. 440 unsigned R600InstrInfo::isLegalUpTo( 441 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, 442 const std::vector<R600InstrInfo::BankSwizzle> &Swz, 443 const std::vector<std::pair<int, unsigned> > &TransSrcs, 444 R600InstrInfo::BankSwizzle TransSwz) const { 445 int Vector[4][3]; 446 memset(Vector, -1, sizeof(Vector)); 447 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) { 448 const std::vector<std::pair<int, unsigned> > &Srcs = 449 Swizzle(IGSrcs[i], Swz[i]); 450 for (unsigned j = 0; j < 3; j++) { 451 const std::pair<int, unsigned> &Src = Srcs[j]; 452 if (Src.first < 0 || Src.first == 255) 453 continue; 454 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) { 455 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 && 456 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) { 457 // The value from output queue A (denoted by register OQAP) can 458 // only be fetched during the first cycle. 459 return false; 460 } 461 // OQAP does not count towards the normal read port restrictions 462 continue; 463 } 464 if (Vector[Src.second][j] < 0) 465 Vector[Src.second][j] = Src.first; 466 if (Vector[Src.second][j] != Src.first) 467 return i; 468 } 469 } 470 // Now check Trans Alu 471 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) { 472 const std::pair<int, unsigned> &Src = TransSrcs[i]; 473 unsigned Cycle = getTransSwizzle(TransSwz, i); 474 if (Src.first < 0) 475 continue; 476 if (Src.first == 255) 477 continue; 478 if (Vector[Src.second][Cycle] < 0) 479 Vector[Src.second][Cycle] = Src.first; 480 if (Vector[Src.second][Cycle] != Src.first) 481 return IGSrcs.size() - 1; 482 } 483 return IGSrcs.size(); 484 } 485 486 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next 487 /// (in lexicographic term) swizzle sequence assuming that all swizzles after 488 /// Idx can be skipped 489 static bool 490 NextPossibleSolution( 491 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, 492 unsigned Idx) { 493 assert(Idx < SwzCandidate.size()); 494 int ResetIdx = Idx; 495 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210) 496 ResetIdx --; 497 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) { 498 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210; 499 } 500 if (ResetIdx == -1) 501 return false; 502 int NextSwizzle = SwzCandidate[ResetIdx] + 1; 503 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle; 504 return true; 505 } 506 507 /// Enumerate all possible Swizzle sequence to find one that can meet all 508 /// read port requirements. 509 bool R600InstrInfo::FindSwizzleForVectorSlot( 510 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, 511 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, 512 const std::vector<std::pair<int, unsigned> > &TransSrcs, 513 R600InstrInfo::BankSwizzle TransSwz) const { 514 unsigned ValidUpTo = 0; 515 do { 516 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz); 517 if (ValidUpTo == IGSrcs.size()) 518 return true; 519 } while (NextPossibleSolution(SwzCandidate, ValidUpTo)); 520 return false; 521 } 522 523 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read 524 /// a const, and can't read a gpr at cycle 1 if they read 2 const. 525 static bool 526 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz, 527 const std::vector<std::pair<int, unsigned> > &TransOps, 528 unsigned ConstCount) { 529 // TransALU can't read 3 constants 530 if (ConstCount > 2) 531 return false; 532 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) { 533 const std::pair<int, unsigned> &Src = TransOps[i]; 534 unsigned Cycle = getTransSwizzle(TransSwz, i); 535 if (Src.first < 0) 536 continue; 537 if (ConstCount > 0 && Cycle == 0) 538 return false; 539 if (ConstCount > 1 && Cycle == 1) 540 return false; 541 } 542 return true; 543 } 544 545 bool 546 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG, 547 const DenseMap<unsigned, unsigned> &PV, 548 std::vector<BankSwizzle> &ValidSwizzle, 549 bool isLastAluTrans) 550 const { 551 //Todo : support shared src0 - src1 operand 552 553 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs; 554 ValidSwizzle.clear(); 555 unsigned ConstCount; 556 BankSwizzle TransBS = ALU_VEC_012_SCL_210; 557 for (unsigned i = 0, e = IG.size(); i < e; ++i) { 558 IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount)); 559 unsigned Op = getOperandIdx(IG[i]->getOpcode(), 560 AMDGPU::OpName::bank_swizzle); 561 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle) 562 IG[i]->getOperand(Op).getImm()); 563 } 564 std::vector<std::pair<int, unsigned> > TransOps; 565 if (!isLastAluTrans) 566 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS); 567 568 TransOps = std::move(IGSrcs.back()); 569 IGSrcs.pop_back(); 570 ValidSwizzle.pop_back(); 571 572 static const R600InstrInfo::BankSwizzle TransSwz[] = { 573 ALU_VEC_012_SCL_210, 574 ALU_VEC_021_SCL_122, 575 ALU_VEC_120_SCL_212, 576 ALU_VEC_102_SCL_221 577 }; 578 for (unsigned i = 0; i < 4; i++) { 579 TransBS = TransSwz[i]; 580 if (!isConstCompatible(TransBS, TransOps, ConstCount)) 581 continue; 582 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, 583 TransBS); 584 if (Result) { 585 ValidSwizzle.push_back(TransBS); 586 return true; 587 } 588 } 589 590 return false; 591 } 592 593 594 bool 595 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts) 596 const { 597 assert (Consts.size() <= 12 && "Too many operands in instructions group"); 598 unsigned Pair1 = 0, Pair2 = 0; 599 for (unsigned i = 0, n = Consts.size(); i < n; ++i) { 600 unsigned ReadConstHalf = Consts[i] & 2; 601 unsigned ReadConstIndex = Consts[i] & (~3); 602 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf; 603 if (!Pair1) { 604 Pair1 = ReadHalfConst; 605 continue; 606 } 607 if (Pair1 == ReadHalfConst) 608 continue; 609 if (!Pair2) { 610 Pair2 = ReadHalfConst; 611 continue; 612 } 613 if (Pair2 != ReadHalfConst) 614 return false; 615 } 616 return true; 617 } 618 619 bool 620 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs) 621 const { 622 std::vector<unsigned> Consts; 623 SmallSet<int64_t, 4> Literals; 624 for (unsigned i = 0, n = MIs.size(); i < n; i++) { 625 MachineInstr *MI = MIs[i]; 626 if (!isALUInstr(MI->getOpcode())) 627 continue; 628 629 ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI); 630 631 for (unsigned j = 0, e = Srcs.size(); j < e; j++) { 632 std::pair<MachineOperand *, unsigned> Src = Srcs[j]; 633 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X) 634 Literals.insert(Src.second); 635 if (Literals.size() > 4) 636 return false; 637 if (Src.first->getReg() == AMDGPU::ALU_CONST) 638 Consts.push_back(Src.second); 639 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) || 640 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) { 641 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff; 642 unsigned Chan = RI.getHWRegChan(Src.first->getReg()); 643 Consts.push_back((Index << 2) | Chan); 644 } 645 } 646 } 647 return fitsConstReadLimitations(Consts); 648 } 649 650 DFAPacketizer * 651 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const { 652 const InstrItineraryData *II = STI.getInstrItineraryData(); 653 return static_cast<const AMDGPUSubtarget &>(STI).createDFAPacketizer(II); 654 } 655 656 static bool 657 isPredicateSetter(unsigned Opcode) { 658 switch (Opcode) { 659 case AMDGPU::PRED_X: 660 return true; 661 default: 662 return false; 663 } 664 } 665 666 static MachineInstr * 667 findFirstPredicateSetterFrom(MachineBasicBlock &MBB, 668 MachineBasicBlock::iterator I) { 669 while (I != MBB.begin()) { 670 --I; 671 MachineInstr *MI = I; 672 if (isPredicateSetter(MI->getOpcode())) 673 return MI; 674 } 675 676 return nullptr; 677 } 678 679 static 680 bool isJump(unsigned Opcode) { 681 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND; 682 } 683 684 static bool isBranch(unsigned Opcode) { 685 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 || 686 Opcode == AMDGPU::BRANCH_COND_f32; 687 } 688 689 bool 690 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 691 MachineBasicBlock *&TBB, 692 MachineBasicBlock *&FBB, 693 SmallVectorImpl<MachineOperand> &Cond, 694 bool AllowModify) const { 695 // Most of the following comes from the ARM implementation of AnalyzeBranch 696 697 // If the block has no terminators, it just falls into the block after it. 698 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 699 if (I == MBB.end()) 700 return false; 701 702 // AMDGPU::BRANCH* instructions are only available after isel and are not 703 // handled 704 if (isBranch(I->getOpcode())) 705 return true; 706 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) { 707 return false; 708 } 709 710 // Remove successive JUMP 711 while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) { 712 MachineBasicBlock::iterator PriorI = std::prev(I); 713 if (AllowModify) 714 I->removeFromParent(); 715 I = PriorI; 716 } 717 MachineInstr *LastInst = I; 718 719 // If there is only one terminator instruction, process it. 720 unsigned LastOpc = LastInst->getOpcode(); 721 if (I == MBB.begin() || 722 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) { 723 if (LastOpc == AMDGPU::JUMP) { 724 TBB = LastInst->getOperand(0).getMBB(); 725 return false; 726 } else if (LastOpc == AMDGPU::JUMP_COND) { 727 MachineInstr *predSet = I; 728 while (!isPredicateSetter(predSet->getOpcode())) { 729 predSet = --I; 730 } 731 TBB = LastInst->getOperand(0).getMBB(); 732 Cond.push_back(predSet->getOperand(1)); 733 Cond.push_back(predSet->getOperand(2)); 734 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false)); 735 return false; 736 } 737 return true; // Can't handle indirect branch. 738 } 739 740 // Get the instruction before it if it is a terminator. 741 MachineInstr *SecondLastInst = I; 742 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 743 744 // If the block ends with a B and a Bcc, handle it. 745 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) { 746 MachineInstr *predSet = --I; 747 while (!isPredicateSetter(predSet->getOpcode())) { 748 predSet = --I; 749 } 750 TBB = SecondLastInst->getOperand(0).getMBB(); 751 FBB = LastInst->getOperand(0).getMBB(); 752 Cond.push_back(predSet->getOperand(1)); 753 Cond.push_back(predSet->getOperand(2)); 754 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false)); 755 return false; 756 } 757 758 // Otherwise, can't handle this. 759 return true; 760 } 761 762 static 763 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) { 764 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend(); 765 It != E; ++It) { 766 if (It->getOpcode() == AMDGPU::CF_ALU || 767 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE) 768 return std::prev(It.base()); 769 } 770 return MBB.end(); 771 } 772 773 unsigned 774 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB, 775 MachineBasicBlock *TBB, 776 MachineBasicBlock *FBB, 777 ArrayRef<MachineOperand> Cond, 778 DebugLoc DL) const { 779 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 780 781 if (!FBB) { 782 if (Cond.empty()) { 783 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB); 784 return 1; 785 } else { 786 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end()); 787 assert(PredSet && "No previous predicate !"); 788 addFlag(PredSet, 0, MO_FLAG_PUSH); 789 PredSet->getOperand(2).setImm(Cond[1].getImm()); 790 791 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) 792 .addMBB(TBB) 793 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 794 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 795 if (CfAlu == MBB.end()) 796 return 1; 797 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU); 798 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE)); 799 return 1; 800 } 801 } else { 802 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end()); 803 assert(PredSet && "No previous predicate !"); 804 addFlag(PredSet, 0, MO_FLAG_PUSH); 805 PredSet->getOperand(2).setImm(Cond[1].getImm()); 806 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) 807 .addMBB(TBB) 808 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 809 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB); 810 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 811 if (CfAlu == MBB.end()) 812 return 2; 813 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU); 814 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE)); 815 return 2; 816 } 817 } 818 819 unsigned 820 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 821 822 // Note : we leave PRED* instructions there. 823 // They may be needed when predicating instructions. 824 825 MachineBasicBlock::iterator I = MBB.end(); 826 827 if (I == MBB.begin()) { 828 return 0; 829 } 830 --I; 831 switch (I->getOpcode()) { 832 default: 833 return 0; 834 case AMDGPU::JUMP_COND: { 835 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); 836 clearFlag(predSet, 0, MO_FLAG_PUSH); 837 I->eraseFromParent(); 838 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 839 if (CfAlu == MBB.end()) 840 break; 841 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE); 842 CfAlu->setDesc(get(AMDGPU::CF_ALU)); 843 break; 844 } 845 case AMDGPU::JUMP: 846 I->eraseFromParent(); 847 break; 848 } 849 I = MBB.end(); 850 851 if (I == MBB.begin()) { 852 return 1; 853 } 854 --I; 855 switch (I->getOpcode()) { 856 // FIXME: only one case?? 857 default: 858 return 1; 859 case AMDGPU::JUMP_COND: { 860 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); 861 clearFlag(predSet, 0, MO_FLAG_PUSH); 862 I->eraseFromParent(); 863 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 864 if (CfAlu == MBB.end()) 865 break; 866 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE); 867 CfAlu->setDesc(get(AMDGPU::CF_ALU)); 868 break; 869 } 870 case AMDGPU::JUMP: 871 I->eraseFromParent(); 872 break; 873 } 874 return 2; 875 } 876 877 bool R600InstrInfo::isPredicated(const MachineInstr &MI) const { 878 int idx = MI.findFirstPredOperandIdx(); 879 if (idx < 0) 880 return false; 881 882 unsigned Reg = MI.getOperand(idx).getReg(); 883 switch (Reg) { 884 default: return false; 885 case AMDGPU::PRED_SEL_ONE: 886 case AMDGPU::PRED_SEL_ZERO: 887 case AMDGPU::PREDICATE_BIT: 888 return true; 889 } 890 } 891 892 bool R600InstrInfo::isPredicable(MachineInstr &MI) const { 893 // XXX: KILL* instructions can be predicated, but they must be the last 894 // instruction in a clause, so this means any instructions after them cannot 895 // be predicated. Until we have proper support for instruction clauses in the 896 // backend, we will mark KILL* instructions as unpredicable. 897 898 if (MI.getOpcode() == AMDGPU::KILLGT) { 899 return false; 900 } else if (MI.getOpcode() == AMDGPU::CF_ALU) { 901 // If the clause start in the middle of MBB then the MBB has more 902 // than a single clause, unable to predicate several clauses. 903 if (MI.getParent()->begin() != MachineBasicBlock::iterator(MI)) 904 return false; 905 // TODO: We don't support KC merging atm 906 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0; 907 } else if (isVector(MI)) { 908 return false; 909 } else { 910 return AMDGPUInstrInfo::isPredicable(MI); 911 } 912 } 913 914 915 bool 916 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB, 917 unsigned NumCyles, 918 unsigned ExtraPredCycles, 919 BranchProbability Probability) const{ 920 return true; 921 } 922 923 bool 924 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, 925 unsigned NumTCycles, 926 unsigned ExtraTCycles, 927 MachineBasicBlock &FMBB, 928 unsigned NumFCycles, 929 unsigned ExtraFCycles, 930 BranchProbability Probability) const { 931 return true; 932 } 933 934 bool 935 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB, 936 unsigned NumCyles, 937 BranchProbability Probability) 938 const { 939 return true; 940 } 941 942 bool 943 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, 944 MachineBasicBlock &FMBB) const { 945 return false; 946 } 947 948 949 bool 950 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 951 MachineOperand &MO = Cond[1]; 952 switch (MO.getImm()) { 953 case OPCODE_IS_ZERO_INT: 954 MO.setImm(OPCODE_IS_NOT_ZERO_INT); 955 break; 956 case OPCODE_IS_NOT_ZERO_INT: 957 MO.setImm(OPCODE_IS_ZERO_INT); 958 break; 959 case OPCODE_IS_ZERO: 960 MO.setImm(OPCODE_IS_NOT_ZERO); 961 break; 962 case OPCODE_IS_NOT_ZERO: 963 MO.setImm(OPCODE_IS_ZERO); 964 break; 965 default: 966 return true; 967 } 968 969 MachineOperand &MO2 = Cond[2]; 970 switch (MO2.getReg()) { 971 case AMDGPU::PRED_SEL_ZERO: 972 MO2.setReg(AMDGPU::PRED_SEL_ONE); 973 break; 974 case AMDGPU::PRED_SEL_ONE: 975 MO2.setReg(AMDGPU::PRED_SEL_ZERO); 976 break; 977 default: 978 return true; 979 } 980 return false; 981 } 982 983 bool R600InstrInfo::DefinesPredicate(MachineInstr &MI, 984 std::vector<MachineOperand> &Pred) const { 985 return isPredicateSetter(MI.getOpcode()); 986 } 987 988 989 bool 990 R600InstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 991 ArrayRef<MachineOperand> Pred2) const { 992 return false; 993 } 994 995 bool R600InstrInfo::PredicateInstruction(MachineInstr &MI, 996 ArrayRef<MachineOperand> Pred) const { 997 int PIdx = MI.findFirstPredOperandIdx(); 998 999 if (MI.getOpcode() == AMDGPU::CF_ALU) { 1000 MI.getOperand(8).setImm(0); 1001 return true; 1002 } 1003 1004 if (MI.getOpcode() == AMDGPU::DOT_4) { 1005 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_X)) 1006 .setReg(Pred[2].getReg()); 1007 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Y)) 1008 .setReg(Pred[2].getReg()); 1009 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Z)) 1010 .setReg(Pred[2].getReg()); 1011 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_W)) 1012 .setReg(Pred[2].getReg()); 1013 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1014 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit); 1015 return true; 1016 } 1017 1018 if (PIdx != -1) { 1019 MachineOperand &PMO = MI.getOperand(PIdx); 1020 PMO.setReg(Pred[2].getReg()); 1021 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1022 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit); 1023 return true; 1024 } 1025 1026 return false; 1027 } 1028 1029 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const { 1030 return 2; 1031 } 1032 1033 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1034 const MachineInstr *MI, 1035 unsigned *PredCost) const { 1036 if (PredCost) 1037 *PredCost = 2; 1038 return 2; 1039 } 1040 1041 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex, 1042 unsigned Channel) const { 1043 assert(Channel == 0); 1044 return RegIndex; 1045 } 1046 1047 bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 1048 1049 switch(MI->getOpcode()) { 1050 default: { 1051 MachineBasicBlock *MBB = MI->getParent(); 1052 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1053 AMDGPU::OpName::addr); 1054 // addr is a custom operand with multiple MI operands, and only the 1055 // first MI operand is given a name. 1056 int RegOpIdx = OffsetOpIdx + 1; 1057 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1058 AMDGPU::OpName::chan); 1059 if (isRegisterLoad(*MI)) { 1060 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1061 AMDGPU::OpName::dst); 1062 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm(); 1063 unsigned Channel = MI->getOperand(ChanOpIdx).getImm(); 1064 unsigned Address = calculateIndirectAddress(RegIndex, Channel); 1065 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg(); 1066 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) { 1067 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(), 1068 getIndirectAddrRegClass()->getRegister(Address)); 1069 } else { 1070 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(), 1071 Address, OffsetReg); 1072 } 1073 } else if (isRegisterStore(*MI)) { 1074 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1075 AMDGPU::OpName::val); 1076 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm(); 1077 unsigned Channel = MI->getOperand(ChanOpIdx).getImm(); 1078 unsigned Address = calculateIndirectAddress(RegIndex, Channel); 1079 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg(); 1080 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) { 1081 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address), 1082 MI->getOperand(ValOpIdx).getReg()); 1083 } else { 1084 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(), 1085 calculateIndirectAddress(RegIndex, Channel), 1086 OffsetReg); 1087 } 1088 } else { 1089 return false; 1090 } 1091 1092 MBB->erase(MI); 1093 return true; 1094 } 1095 case AMDGPU::R600_EXTRACT_ELT_V2: 1096 case AMDGPU::R600_EXTRACT_ELT_V4: 1097 buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(), 1098 RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address 1099 MI->getOperand(2).getReg(), 1100 RI.getHWRegChan(MI->getOperand(1).getReg())); 1101 break; 1102 case AMDGPU::R600_INSERT_ELT_V2: 1103 case AMDGPU::R600_INSERT_ELT_V4: 1104 buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value 1105 RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address 1106 MI->getOperand(3).getReg(), // Offset 1107 RI.getHWRegChan(MI->getOperand(1).getReg())); // Channel 1108 break; 1109 } 1110 MI->eraseFromParent(); 1111 return true; 1112 } 1113 1114 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved, 1115 const MachineFunction &MF) const { 1116 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>( 1117 MF.getSubtarget().getFrameLowering()); 1118 1119 unsigned StackWidth = TFL->getStackWidth(MF); 1120 int End = getIndirectIndexEnd(MF); 1121 1122 if (End == -1) 1123 return; 1124 1125 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) { 1126 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index); 1127 Reserved.set(SuperReg); 1128 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) { 1129 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan); 1130 Reserved.set(Reg); 1131 } 1132 } 1133 } 1134 1135 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const { 1136 return &AMDGPU::R600_TReg32_XRegClass; 1137 } 1138 1139 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, 1140 MachineBasicBlock::iterator I, 1141 unsigned ValueReg, unsigned Address, 1142 unsigned OffsetReg) const { 1143 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0); 1144 } 1145 1146 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, 1147 MachineBasicBlock::iterator I, 1148 unsigned ValueReg, unsigned Address, 1149 unsigned OffsetReg, 1150 unsigned AddrChan) const { 1151 unsigned AddrReg; 1152 switch (AddrChan) { 1153 default: llvm_unreachable("Invalid Channel"); 1154 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break; 1155 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break; 1156 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break; 1157 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break; 1158 } 1159 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, 1160 AMDGPU::AR_X, OffsetReg); 1161 setImmOperand(MOVA, AMDGPU::OpName::write, 0); 1162 1163 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, 1164 AddrReg, ValueReg) 1165 .addReg(AMDGPU::AR_X, 1166 RegState::Implicit | RegState::Kill); 1167 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1); 1168 return Mov; 1169 } 1170 1171 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, 1172 MachineBasicBlock::iterator I, 1173 unsigned ValueReg, unsigned Address, 1174 unsigned OffsetReg) const { 1175 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0); 1176 } 1177 1178 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, 1179 MachineBasicBlock::iterator I, 1180 unsigned ValueReg, unsigned Address, 1181 unsigned OffsetReg, 1182 unsigned AddrChan) const { 1183 unsigned AddrReg; 1184 switch (AddrChan) { 1185 default: llvm_unreachable("Invalid Channel"); 1186 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break; 1187 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break; 1188 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break; 1189 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break; 1190 } 1191 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, 1192 AMDGPU::AR_X, 1193 OffsetReg); 1194 setImmOperand(MOVA, AMDGPU::OpName::write, 0); 1195 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, 1196 ValueReg, 1197 AddrReg) 1198 .addReg(AMDGPU::AR_X, 1199 RegState::Implicit | RegState::Kill); 1200 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1); 1201 1202 return Mov; 1203 } 1204 1205 unsigned R600InstrInfo::getMaxAlusPerClause() const { 1206 return 115; 1207 } 1208 1209 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB, 1210 MachineBasicBlock::iterator I, 1211 unsigned Opcode, 1212 unsigned DstReg, 1213 unsigned Src0Reg, 1214 unsigned Src1Reg) const { 1215 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode), 1216 DstReg); // $dst 1217 1218 if (Src1Reg) { 1219 MIB.addImm(0) // $update_exec_mask 1220 .addImm(0); // $update_predicate 1221 } 1222 MIB.addImm(1) // $write 1223 .addImm(0) // $omod 1224 .addImm(0) // $dst_rel 1225 .addImm(0) // $dst_clamp 1226 .addReg(Src0Reg) // $src0 1227 .addImm(0) // $src0_neg 1228 .addImm(0) // $src0_rel 1229 .addImm(0) // $src0_abs 1230 .addImm(-1); // $src0_sel 1231 1232 if (Src1Reg) { 1233 MIB.addReg(Src1Reg) // $src1 1234 .addImm(0) // $src1_neg 1235 .addImm(0) // $src1_rel 1236 .addImm(0) // $src1_abs 1237 .addImm(-1); // $src1_sel 1238 } 1239 1240 //XXX: The r600g finalizer expects this to be 1, once we've moved the 1241 //scheduling to the backend, we can change the default to 0. 1242 MIB.addImm(1) // $last 1243 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel 1244 .addImm(0) // $literal 1245 .addImm(0); // $bank_swizzle 1246 1247 return MIB; 1248 } 1249 1250 #define OPERAND_CASE(Label) \ 1251 case Label: { \ 1252 static const unsigned Ops[] = \ 1253 { \ 1254 Label##_X, \ 1255 Label##_Y, \ 1256 Label##_Z, \ 1257 Label##_W \ 1258 }; \ 1259 return Ops[Slot]; \ 1260 } 1261 1262 static unsigned getSlotedOps(unsigned Op, unsigned Slot) { 1263 switch (Op) { 1264 OPERAND_CASE(AMDGPU::OpName::update_exec_mask) 1265 OPERAND_CASE(AMDGPU::OpName::update_pred) 1266 OPERAND_CASE(AMDGPU::OpName::write) 1267 OPERAND_CASE(AMDGPU::OpName::omod) 1268 OPERAND_CASE(AMDGPU::OpName::dst_rel) 1269 OPERAND_CASE(AMDGPU::OpName::clamp) 1270 OPERAND_CASE(AMDGPU::OpName::src0) 1271 OPERAND_CASE(AMDGPU::OpName::src0_neg) 1272 OPERAND_CASE(AMDGPU::OpName::src0_rel) 1273 OPERAND_CASE(AMDGPU::OpName::src0_abs) 1274 OPERAND_CASE(AMDGPU::OpName::src0_sel) 1275 OPERAND_CASE(AMDGPU::OpName::src1) 1276 OPERAND_CASE(AMDGPU::OpName::src1_neg) 1277 OPERAND_CASE(AMDGPU::OpName::src1_rel) 1278 OPERAND_CASE(AMDGPU::OpName::src1_abs) 1279 OPERAND_CASE(AMDGPU::OpName::src1_sel) 1280 OPERAND_CASE(AMDGPU::OpName::pred_sel) 1281 default: 1282 llvm_unreachable("Wrong Operand"); 1283 } 1284 } 1285 1286 #undef OPERAND_CASE 1287 1288 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( 1289 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg) 1290 const { 1291 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented"); 1292 unsigned Opcode; 1293 if (ST.getGeneration() <= AMDGPUSubtarget::R700) 1294 Opcode = AMDGPU::DOT4_r600; 1295 else 1296 Opcode = AMDGPU::DOT4_eg; 1297 MachineBasicBlock::iterator I = MI; 1298 MachineOperand &Src0 = MI->getOperand( 1299 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot))); 1300 MachineOperand &Src1 = MI->getOperand( 1301 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot))); 1302 MachineInstr *MIB = buildDefaultInstruction( 1303 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg()); 1304 static const unsigned Operands[14] = { 1305 AMDGPU::OpName::update_exec_mask, 1306 AMDGPU::OpName::update_pred, 1307 AMDGPU::OpName::write, 1308 AMDGPU::OpName::omod, 1309 AMDGPU::OpName::dst_rel, 1310 AMDGPU::OpName::clamp, 1311 AMDGPU::OpName::src0_neg, 1312 AMDGPU::OpName::src0_rel, 1313 AMDGPU::OpName::src0_abs, 1314 AMDGPU::OpName::src0_sel, 1315 AMDGPU::OpName::src1_neg, 1316 AMDGPU::OpName::src1_rel, 1317 AMDGPU::OpName::src1_abs, 1318 AMDGPU::OpName::src1_sel, 1319 }; 1320 1321 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), 1322 getSlotedOps(AMDGPU::OpName::pred_sel, Slot))); 1323 MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel)) 1324 .setReg(MO.getReg()); 1325 1326 for (unsigned i = 0; i < 14; i++) { 1327 MachineOperand &MO = MI->getOperand( 1328 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot))); 1329 assert (MO.isImm()); 1330 setImmOperand(MIB, Operands[i], MO.getImm()); 1331 } 1332 MIB->getOperand(20).setImm(0); 1333 return MIB; 1334 } 1335 1336 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB, 1337 MachineBasicBlock::iterator I, 1338 unsigned DstReg, 1339 uint64_t Imm) const { 1340 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg, 1341 AMDGPU::ALU_LITERAL_X); 1342 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm); 1343 return MovImm; 1344 } 1345 1346 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB, 1347 MachineBasicBlock::iterator I, 1348 unsigned DstReg, unsigned SrcReg) const { 1349 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg); 1350 } 1351 1352 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const { 1353 return getOperandIdx(MI.getOpcode(), Op); 1354 } 1355 1356 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const { 1357 return AMDGPU::getNamedOperandIdx(Opcode, Op); 1358 } 1359 1360 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op, 1361 int64_t Imm) const { 1362 int Idx = getOperandIdx(*MI, Op); 1363 assert(Idx != -1 && "Operand not supported for this instruction."); 1364 assert(MI->getOperand(Idx).isImm()); 1365 MI->getOperand(Idx).setImm(Imm); 1366 } 1367 1368 //===----------------------------------------------------------------------===// 1369 // Instruction flag getters/setters 1370 //===----------------------------------------------------------------------===// 1371 1372 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const { 1373 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0; 1374 } 1375 1376 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx, 1377 unsigned Flag) const { 1378 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1379 int FlagIndex = 0; 1380 if (Flag != 0) { 1381 // If we pass something other than the default value of Flag to this 1382 // function, it means we are want to set a flag on an instruction 1383 // that uses native encoding. 1384 assert(HAS_NATIVE_OPERANDS(TargetFlags)); 1385 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3; 1386 switch (Flag) { 1387 case MO_FLAG_CLAMP: 1388 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp); 1389 break; 1390 case MO_FLAG_MASK: 1391 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write); 1392 break; 1393 case MO_FLAG_NOT_LAST: 1394 case MO_FLAG_LAST: 1395 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last); 1396 break; 1397 case MO_FLAG_NEG: 1398 switch (SrcIdx) { 1399 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break; 1400 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break; 1401 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break; 1402 } 1403 break; 1404 1405 case MO_FLAG_ABS: 1406 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 " 1407 "instructions."); 1408 (void)IsOP3; 1409 switch (SrcIdx) { 1410 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break; 1411 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break; 1412 } 1413 break; 1414 1415 default: 1416 FlagIndex = -1; 1417 break; 1418 } 1419 assert(FlagIndex != -1 && "Flag not supported for this instruction"); 1420 } else { 1421 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags); 1422 assert(FlagIndex != 0 && 1423 "Instruction flags not supported for this instruction"); 1424 } 1425 1426 MachineOperand &FlagOp = MI->getOperand(FlagIndex); 1427 assert(FlagOp.isImm()); 1428 return FlagOp; 1429 } 1430 1431 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand, 1432 unsigned Flag) const { 1433 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1434 if (Flag == 0) { 1435 return; 1436 } 1437 if (HAS_NATIVE_OPERANDS(TargetFlags)) { 1438 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); 1439 if (Flag == MO_FLAG_NOT_LAST) { 1440 clearFlag(MI, Operand, MO_FLAG_LAST); 1441 } else if (Flag == MO_FLAG_MASK) { 1442 clearFlag(MI, Operand, Flag); 1443 } else { 1444 FlagOp.setImm(1); 1445 } 1446 } else { 1447 MachineOperand &FlagOp = getFlagOp(MI, Operand); 1448 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand))); 1449 } 1450 } 1451 1452 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand, 1453 unsigned Flag) const { 1454 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1455 if (HAS_NATIVE_OPERANDS(TargetFlags)) { 1456 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); 1457 FlagOp.setImm(0); 1458 } else { 1459 MachineOperand &FlagOp = getFlagOp(MI); 1460 unsigned InstFlags = FlagOp.getImm(); 1461 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand)); 1462 FlagOp.setImm(InstFlags); 1463 } 1464 } 1465 1466 bool R600InstrInfo::isRegisterStore(const MachineInstr &MI) const { 1467 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE; 1468 } 1469 1470 bool R600InstrInfo::isRegisterLoad(const MachineInstr &MI) const { 1471 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD; 1472 } 1473