1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief R600 Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "R600InstrInfo.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUSubtarget.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "R600Defines.h" 20 #include "R600MachineFunctionInfo.h" 21 #include "R600RegisterInfo.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 26 using namespace llvm; 27 28 #define GET_INSTRINFO_CTOR_DTOR 29 #include "AMDGPUGenDFAPacketizer.inc" 30 31 R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st) 32 : AMDGPUInstrInfo(st), RI() {} 33 34 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const { 35 return RI; 36 } 37 38 bool R600InstrInfo::isTrig(const MachineInstr &MI) const { 39 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG; 40 } 41 42 bool R600InstrInfo::isVector(const MachineInstr &MI) const { 43 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR; 44 } 45 46 void 47 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 48 MachineBasicBlock::iterator MI, DebugLoc DL, 49 unsigned DestReg, unsigned SrcReg, 50 bool KillSrc) const { 51 unsigned VectorComponents = 0; 52 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) || 53 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) && 54 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) || 55 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) { 56 VectorComponents = 4; 57 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) || 58 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) && 59 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) || 60 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) { 61 VectorComponents = 2; 62 } 63 64 if (VectorComponents > 0) { 65 for (unsigned I = 0; I < VectorComponents; I++) { 66 unsigned SubRegIndex = RI.getSubRegFromChannel(I); 67 buildDefaultInstruction(MBB, MI, AMDGPU::MOV, 68 RI.getSubReg(DestReg, SubRegIndex), 69 RI.getSubReg(SrcReg, SubRegIndex)) 70 .addReg(DestReg, 71 RegState::Define | RegState::Implicit); 72 } 73 } else { 74 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV, 75 DestReg, SrcReg); 76 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0)) 77 .setIsKill(KillSrc); 78 } 79 } 80 81 /// \returns true if \p MBBI can be moved into a new basic. 82 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 83 MachineBasicBlock::iterator MBBI) const { 84 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(), 85 E = MBBI->operands_end(); I != E; ++I) { 86 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) && 87 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg())) 88 return false; 89 } 90 return true; 91 } 92 93 bool R600InstrInfo::isMov(unsigned Opcode) const { 94 95 96 switch(Opcode) { 97 default: return false; 98 case AMDGPU::MOV: 99 case AMDGPU::MOV_IMM_F32: 100 case AMDGPU::MOV_IMM_I32: 101 return true; 102 } 103 } 104 105 // Some instructions act as place holders to emulate operations that the GPU 106 // hardware does automatically. This function can be used to check if 107 // an opcode falls into this category. 108 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const { 109 switch (Opcode) { 110 default: return false; 111 case AMDGPU::RETURN: 112 return true; 113 } 114 } 115 116 bool R600InstrInfo::isReductionOp(unsigned Opcode) const { 117 return false; 118 } 119 120 bool R600InstrInfo::isCubeOp(unsigned Opcode) const { 121 switch(Opcode) { 122 default: return false; 123 case AMDGPU::CUBE_r600_pseudo: 124 case AMDGPU::CUBE_r600_real: 125 case AMDGPU::CUBE_eg_pseudo: 126 case AMDGPU::CUBE_eg_real: 127 return true; 128 } 129 } 130 131 bool R600InstrInfo::isALUInstr(unsigned Opcode) const { 132 unsigned TargetFlags = get(Opcode).TSFlags; 133 134 return (TargetFlags & R600_InstFlag::ALU_INST); 135 } 136 137 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const { 138 unsigned TargetFlags = get(Opcode).TSFlags; 139 140 return ((TargetFlags & R600_InstFlag::OP1) | 141 (TargetFlags & R600_InstFlag::OP2) | 142 (TargetFlags & R600_InstFlag::OP3)); 143 } 144 145 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const { 146 unsigned TargetFlags = get(Opcode).TSFlags; 147 148 return ((TargetFlags & R600_InstFlag::LDS_1A) | 149 (TargetFlags & R600_InstFlag::LDS_1A1D) | 150 (TargetFlags & R600_InstFlag::LDS_1A2D)); 151 } 152 153 bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const { 154 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1; 155 } 156 157 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const { 158 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1; 159 } 160 161 bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const { 162 if (isALUInstr(MI->getOpcode())) 163 return true; 164 if (isVector(*MI) || isCubeOp(MI->getOpcode())) 165 return true; 166 switch (MI->getOpcode()) { 167 case AMDGPU::PRED_X: 168 case AMDGPU::INTERP_PAIR_XY: 169 case AMDGPU::INTERP_PAIR_ZW: 170 case AMDGPU::INTERP_VEC_LOAD: 171 case AMDGPU::COPY: 172 case AMDGPU::DOT_4: 173 return true; 174 default: 175 return false; 176 } 177 } 178 179 bool R600InstrInfo::isTransOnly(unsigned Opcode) const { 180 if (ST.hasCaymanISA()) 181 return false; 182 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU); 183 } 184 185 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const { 186 return isTransOnly(MI->getOpcode()); 187 } 188 189 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const { 190 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU); 191 } 192 193 bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const { 194 return isVectorOnly(MI->getOpcode()); 195 } 196 197 bool R600InstrInfo::isExport(unsigned Opcode) const { 198 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT); 199 } 200 201 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { 202 return ST.hasVertexCache() && IS_VTX(get(Opcode)); 203 } 204 205 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const { 206 const MachineFunction *MF = MI->getParent()->getParent(); 207 const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>(); 208 return MFI->getShaderType() != ShaderType::COMPUTE && 209 usesVertexCache(MI->getOpcode()); 210 } 211 212 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { 213 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode)); 214 } 215 216 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const { 217 const MachineFunction *MF = MI->getParent()->getParent(); 218 const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>(); 219 return (MFI->getShaderType() == ShaderType::COMPUTE && 220 usesVertexCache(MI->getOpcode())) || 221 usesTextureCache(MI->getOpcode()); 222 } 223 224 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const { 225 switch (Opcode) { 226 case AMDGPU::KILLGT: 227 case AMDGPU::GROUP_BARRIER: 228 return true; 229 default: 230 return false; 231 } 232 } 233 234 bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const { 235 return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1; 236 } 237 238 bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const { 239 return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1; 240 } 241 242 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const { 243 if (!isALUInstr(MI->getOpcode())) { 244 return false; 245 } 246 for (MachineInstr::const_mop_iterator I = MI->operands_begin(), 247 E = MI->operands_end(); I != E; ++I) { 248 if (!I->isReg() || !I->isUse() || 249 TargetRegisterInfo::isVirtualRegister(I->getReg())) 250 continue; 251 252 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg())) 253 return true; 254 } 255 return false; 256 } 257 258 int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const { 259 static const unsigned OpTable[] = { 260 AMDGPU::OpName::src0, 261 AMDGPU::OpName::src1, 262 AMDGPU::OpName::src2 263 }; 264 265 assert (SrcNum < 3); 266 return getOperandIdx(Opcode, OpTable[SrcNum]); 267 } 268 269 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const { 270 static const unsigned SrcSelTable[][2] = { 271 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, 272 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, 273 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, 274 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, 275 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, 276 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, 277 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, 278 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, 279 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, 280 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, 281 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W} 282 }; 283 284 for (const auto &Row : SrcSelTable) { 285 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) { 286 return getOperandIdx(Opcode, Row[1]); 287 } 288 } 289 return -1; 290 } 291 292 SmallVector<std::pair<MachineOperand *, int64_t>, 3> 293 R600InstrInfo::getSrcs(MachineInstr *MI) const { 294 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result; 295 296 if (MI->getOpcode() == AMDGPU::DOT_4) { 297 static const unsigned OpTable[8][2] = { 298 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, 299 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, 300 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, 301 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, 302 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, 303 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, 304 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, 305 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}, 306 }; 307 308 for (unsigned j = 0; j < 8; j++) { 309 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), 310 OpTable[j][0])); 311 unsigned Reg = MO.getReg(); 312 if (Reg == AMDGPU::ALU_CONST) { 313 unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(), 314 OpTable[j][1])).getImm(); 315 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel)); 316 continue; 317 } 318 319 } 320 return Result; 321 } 322 323 static const unsigned OpTable[3][2] = { 324 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, 325 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, 326 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, 327 }; 328 329 for (unsigned j = 0; j < 3; j++) { 330 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]); 331 if (SrcIdx < 0) 332 break; 333 MachineOperand &MO = MI->getOperand(SrcIdx); 334 unsigned Reg = MI->getOperand(SrcIdx).getReg(); 335 if (Reg == AMDGPU::ALU_CONST) { 336 unsigned Sel = MI->getOperand( 337 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm(); 338 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel)); 339 continue; 340 } 341 if (Reg == AMDGPU::ALU_LITERAL_X) { 342 unsigned Imm = MI->getOperand( 343 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm(); 344 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm)); 345 continue; 346 } 347 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0)); 348 } 349 return Result; 350 } 351 352 std::vector<std::pair<int, unsigned> > 353 R600InstrInfo::ExtractSrcs(MachineInstr *MI, 354 const DenseMap<unsigned, unsigned> &PV, 355 unsigned &ConstCount) const { 356 ConstCount = 0; 357 ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI); 358 const std::pair<int, unsigned> DummyPair(-1, 0); 359 std::vector<std::pair<int, unsigned> > Result; 360 unsigned i = 0; 361 for (unsigned n = Srcs.size(); i < n; ++i) { 362 unsigned Reg = Srcs[i].first->getReg(); 363 unsigned Index = RI.getEncodingValue(Reg) & 0xff; 364 if (Reg == AMDGPU::OQAP) { 365 Result.push_back(std::pair<int, unsigned>(Index, 0)); 366 } 367 if (PV.find(Reg) != PV.end()) { 368 // 255 is used to tells its a PS/PV reg 369 Result.push_back(std::pair<int, unsigned>(255, 0)); 370 continue; 371 } 372 if (Index > 127) { 373 ConstCount++; 374 Result.push_back(DummyPair); 375 continue; 376 } 377 unsigned Chan = RI.getHWRegChan(Reg); 378 Result.push_back(std::pair<int, unsigned>(Index, Chan)); 379 } 380 for (; i < 3; ++i) 381 Result.push_back(DummyPair); 382 return Result; 383 } 384 385 static std::vector<std::pair<int, unsigned> > 386 Swizzle(std::vector<std::pair<int, unsigned> > Src, 387 R600InstrInfo::BankSwizzle Swz) { 388 if (Src[0] == Src[1]) 389 Src[1].first = -1; 390 switch (Swz) { 391 case R600InstrInfo::ALU_VEC_012_SCL_210: 392 break; 393 case R600InstrInfo::ALU_VEC_021_SCL_122: 394 std::swap(Src[1], Src[2]); 395 break; 396 case R600InstrInfo::ALU_VEC_102_SCL_221: 397 std::swap(Src[0], Src[1]); 398 break; 399 case R600InstrInfo::ALU_VEC_120_SCL_212: 400 std::swap(Src[0], Src[1]); 401 std::swap(Src[0], Src[2]); 402 break; 403 case R600InstrInfo::ALU_VEC_201: 404 std::swap(Src[0], Src[2]); 405 std::swap(Src[0], Src[1]); 406 break; 407 case R600InstrInfo::ALU_VEC_210: 408 std::swap(Src[0], Src[2]); 409 break; 410 } 411 return Src; 412 } 413 414 static unsigned 415 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) { 416 switch (Swz) { 417 case R600InstrInfo::ALU_VEC_012_SCL_210: { 418 unsigned Cycles[3] = { 2, 1, 0}; 419 return Cycles[Op]; 420 } 421 case R600InstrInfo::ALU_VEC_021_SCL_122: { 422 unsigned Cycles[3] = { 1, 2, 2}; 423 return Cycles[Op]; 424 } 425 case R600InstrInfo::ALU_VEC_120_SCL_212: { 426 unsigned Cycles[3] = { 2, 1, 2}; 427 return Cycles[Op]; 428 } 429 case R600InstrInfo::ALU_VEC_102_SCL_221: { 430 unsigned Cycles[3] = { 2, 2, 1}; 431 return Cycles[Op]; 432 } 433 default: 434 llvm_unreachable("Wrong Swizzle for Trans Slot"); 435 return 0; 436 } 437 } 438 439 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed 440 /// in the same Instruction Group while meeting read port limitations given a 441 /// Swz swizzle sequence. 442 unsigned R600InstrInfo::isLegalUpTo( 443 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, 444 const std::vector<R600InstrInfo::BankSwizzle> &Swz, 445 const std::vector<std::pair<int, unsigned> > &TransSrcs, 446 R600InstrInfo::BankSwizzle TransSwz) const { 447 int Vector[4][3]; 448 memset(Vector, -1, sizeof(Vector)); 449 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) { 450 const std::vector<std::pair<int, unsigned> > &Srcs = 451 Swizzle(IGSrcs[i], Swz[i]); 452 for (unsigned j = 0; j < 3; j++) { 453 const std::pair<int, unsigned> &Src = Srcs[j]; 454 if (Src.first < 0 || Src.first == 255) 455 continue; 456 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) { 457 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 && 458 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) { 459 // The value from output queue A (denoted by register OQAP) can 460 // only be fetched during the first cycle. 461 return false; 462 } 463 // OQAP does not count towards the normal read port restrictions 464 continue; 465 } 466 if (Vector[Src.second][j] < 0) 467 Vector[Src.second][j] = Src.first; 468 if (Vector[Src.second][j] != Src.first) 469 return i; 470 } 471 } 472 // Now check Trans Alu 473 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) { 474 const std::pair<int, unsigned> &Src = TransSrcs[i]; 475 unsigned Cycle = getTransSwizzle(TransSwz, i); 476 if (Src.first < 0) 477 continue; 478 if (Src.first == 255) 479 continue; 480 if (Vector[Src.second][Cycle] < 0) 481 Vector[Src.second][Cycle] = Src.first; 482 if (Vector[Src.second][Cycle] != Src.first) 483 return IGSrcs.size() - 1; 484 } 485 return IGSrcs.size(); 486 } 487 488 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next 489 /// (in lexicographic term) swizzle sequence assuming that all swizzles after 490 /// Idx can be skipped 491 static bool 492 NextPossibleSolution( 493 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, 494 unsigned Idx) { 495 assert(Idx < SwzCandidate.size()); 496 int ResetIdx = Idx; 497 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210) 498 ResetIdx --; 499 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) { 500 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210; 501 } 502 if (ResetIdx == -1) 503 return false; 504 int NextSwizzle = SwzCandidate[ResetIdx] + 1; 505 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle; 506 return true; 507 } 508 509 /// Enumerate all possible Swizzle sequence to find one that can meet all 510 /// read port requirements. 511 bool R600InstrInfo::FindSwizzleForVectorSlot( 512 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs, 513 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate, 514 const std::vector<std::pair<int, unsigned> > &TransSrcs, 515 R600InstrInfo::BankSwizzle TransSwz) const { 516 unsigned ValidUpTo = 0; 517 do { 518 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz); 519 if (ValidUpTo == IGSrcs.size()) 520 return true; 521 } while (NextPossibleSolution(SwzCandidate, ValidUpTo)); 522 return false; 523 } 524 525 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read 526 /// a const, and can't read a gpr at cycle 1 if they read 2 const. 527 static bool 528 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz, 529 const std::vector<std::pair<int, unsigned> > &TransOps, 530 unsigned ConstCount) { 531 // TransALU can't read 3 constants 532 if (ConstCount > 2) 533 return false; 534 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) { 535 const std::pair<int, unsigned> &Src = TransOps[i]; 536 unsigned Cycle = getTransSwizzle(TransSwz, i); 537 if (Src.first < 0) 538 continue; 539 if (ConstCount > 0 && Cycle == 0) 540 return false; 541 if (ConstCount > 1 && Cycle == 1) 542 return false; 543 } 544 return true; 545 } 546 547 bool 548 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG, 549 const DenseMap<unsigned, unsigned> &PV, 550 std::vector<BankSwizzle> &ValidSwizzle, 551 bool isLastAluTrans) 552 const { 553 //Todo : support shared src0 - src1 operand 554 555 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs; 556 ValidSwizzle.clear(); 557 unsigned ConstCount; 558 BankSwizzle TransBS = ALU_VEC_012_SCL_210; 559 for (unsigned i = 0, e = IG.size(); i < e; ++i) { 560 IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount)); 561 unsigned Op = getOperandIdx(IG[i]->getOpcode(), 562 AMDGPU::OpName::bank_swizzle); 563 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle) 564 IG[i]->getOperand(Op).getImm()); 565 } 566 std::vector<std::pair<int, unsigned> > TransOps; 567 if (!isLastAluTrans) 568 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS); 569 570 TransOps = std::move(IGSrcs.back()); 571 IGSrcs.pop_back(); 572 ValidSwizzle.pop_back(); 573 574 static const R600InstrInfo::BankSwizzle TransSwz[] = { 575 ALU_VEC_012_SCL_210, 576 ALU_VEC_021_SCL_122, 577 ALU_VEC_120_SCL_212, 578 ALU_VEC_102_SCL_221 579 }; 580 for (unsigned i = 0; i < 4; i++) { 581 TransBS = TransSwz[i]; 582 if (!isConstCompatible(TransBS, TransOps, ConstCount)) 583 continue; 584 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, 585 TransBS); 586 if (Result) { 587 ValidSwizzle.push_back(TransBS); 588 return true; 589 } 590 } 591 592 return false; 593 } 594 595 596 bool 597 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts) 598 const { 599 assert (Consts.size() <= 12 && "Too many operands in instructions group"); 600 unsigned Pair1 = 0, Pair2 = 0; 601 for (unsigned i = 0, n = Consts.size(); i < n; ++i) { 602 unsigned ReadConstHalf = Consts[i] & 2; 603 unsigned ReadConstIndex = Consts[i] & (~3); 604 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf; 605 if (!Pair1) { 606 Pair1 = ReadHalfConst; 607 continue; 608 } 609 if (Pair1 == ReadHalfConst) 610 continue; 611 if (!Pair2) { 612 Pair2 = ReadHalfConst; 613 continue; 614 } 615 if (Pair2 != ReadHalfConst) 616 return false; 617 } 618 return true; 619 } 620 621 bool 622 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs) 623 const { 624 std::vector<unsigned> Consts; 625 SmallSet<int64_t, 4> Literals; 626 for (unsigned i = 0, n = MIs.size(); i < n; i++) { 627 MachineInstr *MI = MIs[i]; 628 if (!isALUInstr(MI->getOpcode())) 629 continue; 630 631 ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI); 632 633 for (unsigned j = 0, e = Srcs.size(); j < e; j++) { 634 std::pair<MachineOperand *, unsigned> Src = Srcs[j]; 635 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X) 636 Literals.insert(Src.second); 637 if (Literals.size() > 4) 638 return false; 639 if (Src.first->getReg() == AMDGPU::ALU_CONST) 640 Consts.push_back(Src.second); 641 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) || 642 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) { 643 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff; 644 unsigned Chan = RI.getHWRegChan(Src.first->getReg()); 645 Consts.push_back((Index << 2) | Chan); 646 } 647 } 648 } 649 return fitsConstReadLimitations(Consts); 650 } 651 652 DFAPacketizer * 653 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const { 654 const InstrItineraryData *II = STI.getInstrItineraryData(); 655 return static_cast<const AMDGPUSubtarget &>(STI).createDFAPacketizer(II); 656 } 657 658 static bool 659 isPredicateSetter(unsigned Opcode) { 660 switch (Opcode) { 661 case AMDGPU::PRED_X: 662 return true; 663 default: 664 return false; 665 } 666 } 667 668 static MachineInstr * 669 findFirstPredicateSetterFrom(MachineBasicBlock &MBB, 670 MachineBasicBlock::iterator I) { 671 while (I != MBB.begin()) { 672 --I; 673 MachineInstr *MI = I; 674 if (isPredicateSetter(MI->getOpcode())) 675 return MI; 676 } 677 678 return nullptr; 679 } 680 681 static 682 bool isJump(unsigned Opcode) { 683 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND; 684 } 685 686 static bool isBranch(unsigned Opcode) { 687 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 || 688 Opcode == AMDGPU::BRANCH_COND_f32; 689 } 690 691 bool 692 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 693 MachineBasicBlock *&TBB, 694 MachineBasicBlock *&FBB, 695 SmallVectorImpl<MachineOperand> &Cond, 696 bool AllowModify) const { 697 // Most of the following comes from the ARM implementation of AnalyzeBranch 698 699 // If the block has no terminators, it just falls into the block after it. 700 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 701 if (I == MBB.end()) 702 return false; 703 704 // AMDGPU::BRANCH* instructions are only available after isel and are not 705 // handled 706 if (isBranch(I->getOpcode())) 707 return true; 708 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) { 709 return false; 710 } 711 712 // Remove successive JUMP 713 while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) { 714 MachineBasicBlock::iterator PriorI = std::prev(I); 715 if (AllowModify) 716 I->removeFromParent(); 717 I = PriorI; 718 } 719 MachineInstr *LastInst = I; 720 721 // If there is only one terminator instruction, process it. 722 unsigned LastOpc = LastInst->getOpcode(); 723 if (I == MBB.begin() || 724 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) { 725 if (LastOpc == AMDGPU::JUMP) { 726 TBB = LastInst->getOperand(0).getMBB(); 727 return false; 728 } else if (LastOpc == AMDGPU::JUMP_COND) { 729 MachineInstr *predSet = I; 730 while (!isPredicateSetter(predSet->getOpcode())) { 731 predSet = --I; 732 } 733 TBB = LastInst->getOperand(0).getMBB(); 734 Cond.push_back(predSet->getOperand(1)); 735 Cond.push_back(predSet->getOperand(2)); 736 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false)); 737 return false; 738 } 739 return true; // Can't handle indirect branch. 740 } 741 742 // Get the instruction before it if it is a terminator. 743 MachineInstr *SecondLastInst = I; 744 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 745 746 // If the block ends with a B and a Bcc, handle it. 747 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) { 748 MachineInstr *predSet = --I; 749 while (!isPredicateSetter(predSet->getOpcode())) { 750 predSet = --I; 751 } 752 TBB = SecondLastInst->getOperand(0).getMBB(); 753 FBB = LastInst->getOperand(0).getMBB(); 754 Cond.push_back(predSet->getOperand(1)); 755 Cond.push_back(predSet->getOperand(2)); 756 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false)); 757 return false; 758 } 759 760 // Otherwise, can't handle this. 761 return true; 762 } 763 764 static 765 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) { 766 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend(); 767 It != E; ++It) { 768 if (It->getOpcode() == AMDGPU::CF_ALU || 769 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE) 770 return std::prev(It.base()); 771 } 772 return MBB.end(); 773 } 774 775 unsigned 776 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB, 777 MachineBasicBlock *TBB, 778 MachineBasicBlock *FBB, 779 ArrayRef<MachineOperand> Cond, 780 DebugLoc DL) const { 781 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 782 783 if (!FBB) { 784 if (Cond.empty()) { 785 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB); 786 return 1; 787 } else { 788 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end()); 789 assert(PredSet && "No previous predicate !"); 790 addFlag(PredSet, 0, MO_FLAG_PUSH); 791 PredSet->getOperand(2).setImm(Cond[1].getImm()); 792 793 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) 794 .addMBB(TBB) 795 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 796 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 797 if (CfAlu == MBB.end()) 798 return 1; 799 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU); 800 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE)); 801 return 1; 802 } 803 } else { 804 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end()); 805 assert(PredSet && "No previous predicate !"); 806 addFlag(PredSet, 0, MO_FLAG_PUSH); 807 PredSet->getOperand(2).setImm(Cond[1].getImm()); 808 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) 809 .addMBB(TBB) 810 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 811 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB); 812 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 813 if (CfAlu == MBB.end()) 814 return 2; 815 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU); 816 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE)); 817 return 2; 818 } 819 } 820 821 unsigned 822 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 823 824 // Note : we leave PRED* instructions there. 825 // They may be needed when predicating instructions. 826 827 MachineBasicBlock::iterator I = MBB.end(); 828 829 if (I == MBB.begin()) { 830 return 0; 831 } 832 --I; 833 switch (I->getOpcode()) { 834 default: 835 return 0; 836 case AMDGPU::JUMP_COND: { 837 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); 838 clearFlag(predSet, 0, MO_FLAG_PUSH); 839 I->eraseFromParent(); 840 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 841 if (CfAlu == MBB.end()) 842 break; 843 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE); 844 CfAlu->setDesc(get(AMDGPU::CF_ALU)); 845 break; 846 } 847 case AMDGPU::JUMP: 848 I->eraseFromParent(); 849 break; 850 } 851 I = MBB.end(); 852 853 if (I == MBB.begin()) { 854 return 1; 855 } 856 --I; 857 switch (I->getOpcode()) { 858 // FIXME: only one case?? 859 default: 860 return 1; 861 case AMDGPU::JUMP_COND: { 862 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); 863 clearFlag(predSet, 0, MO_FLAG_PUSH); 864 I->eraseFromParent(); 865 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB); 866 if (CfAlu == MBB.end()) 867 break; 868 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE); 869 CfAlu->setDesc(get(AMDGPU::CF_ALU)); 870 break; 871 } 872 case AMDGPU::JUMP: 873 I->eraseFromParent(); 874 break; 875 } 876 return 2; 877 } 878 879 bool R600InstrInfo::isPredicated(const MachineInstr &MI) const { 880 int idx = MI.findFirstPredOperandIdx(); 881 if (idx < 0) 882 return false; 883 884 unsigned Reg = MI.getOperand(idx).getReg(); 885 switch (Reg) { 886 default: return false; 887 case AMDGPU::PRED_SEL_ONE: 888 case AMDGPU::PRED_SEL_ZERO: 889 case AMDGPU::PREDICATE_BIT: 890 return true; 891 } 892 } 893 894 bool R600InstrInfo::isPredicable(MachineInstr &MI) const { 895 // XXX: KILL* instructions can be predicated, but they must be the last 896 // instruction in a clause, so this means any instructions after them cannot 897 // be predicated. Until we have proper support for instruction clauses in the 898 // backend, we will mark KILL* instructions as unpredicable. 899 900 if (MI.getOpcode() == AMDGPU::KILLGT) { 901 return false; 902 } else if (MI.getOpcode() == AMDGPU::CF_ALU) { 903 // If the clause start in the middle of MBB then the MBB has more 904 // than a single clause, unable to predicate several clauses. 905 if (MI.getParent()->begin() != MachineBasicBlock::iterator(MI)) 906 return false; 907 // TODO: We don't support KC merging atm 908 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0; 909 } else if (isVector(MI)) { 910 return false; 911 } else { 912 return AMDGPUInstrInfo::isPredicable(MI); 913 } 914 } 915 916 917 bool 918 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB, 919 unsigned NumCyles, 920 unsigned ExtraPredCycles, 921 BranchProbability Probability) const{ 922 return true; 923 } 924 925 bool 926 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, 927 unsigned NumTCycles, 928 unsigned ExtraTCycles, 929 MachineBasicBlock &FMBB, 930 unsigned NumFCycles, 931 unsigned ExtraFCycles, 932 BranchProbability Probability) const { 933 return true; 934 } 935 936 bool 937 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB, 938 unsigned NumCyles, 939 BranchProbability Probability) 940 const { 941 return true; 942 } 943 944 bool 945 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, 946 MachineBasicBlock &FMBB) const { 947 return false; 948 } 949 950 951 bool 952 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 953 MachineOperand &MO = Cond[1]; 954 switch (MO.getImm()) { 955 case OPCODE_IS_ZERO_INT: 956 MO.setImm(OPCODE_IS_NOT_ZERO_INT); 957 break; 958 case OPCODE_IS_NOT_ZERO_INT: 959 MO.setImm(OPCODE_IS_ZERO_INT); 960 break; 961 case OPCODE_IS_ZERO: 962 MO.setImm(OPCODE_IS_NOT_ZERO); 963 break; 964 case OPCODE_IS_NOT_ZERO: 965 MO.setImm(OPCODE_IS_ZERO); 966 break; 967 default: 968 return true; 969 } 970 971 MachineOperand &MO2 = Cond[2]; 972 switch (MO2.getReg()) { 973 case AMDGPU::PRED_SEL_ZERO: 974 MO2.setReg(AMDGPU::PRED_SEL_ONE); 975 break; 976 case AMDGPU::PRED_SEL_ONE: 977 MO2.setReg(AMDGPU::PRED_SEL_ZERO); 978 break; 979 default: 980 return true; 981 } 982 return false; 983 } 984 985 bool R600InstrInfo::DefinesPredicate(MachineInstr &MI, 986 std::vector<MachineOperand> &Pred) const { 987 return isPredicateSetter(MI.getOpcode()); 988 } 989 990 991 bool 992 R600InstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 993 ArrayRef<MachineOperand> Pred2) const { 994 return false; 995 } 996 997 bool R600InstrInfo::PredicateInstruction(MachineInstr &MI, 998 ArrayRef<MachineOperand> Pred) const { 999 int PIdx = MI.findFirstPredOperandIdx(); 1000 1001 if (MI.getOpcode() == AMDGPU::CF_ALU) { 1002 MI.getOperand(8).setImm(0); 1003 return true; 1004 } 1005 1006 if (MI.getOpcode() == AMDGPU::DOT_4) { 1007 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_X)) 1008 .setReg(Pred[2].getReg()); 1009 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Y)) 1010 .setReg(Pred[2].getReg()); 1011 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Z)) 1012 .setReg(Pred[2].getReg()); 1013 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_W)) 1014 .setReg(Pred[2].getReg()); 1015 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1016 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit); 1017 return true; 1018 } 1019 1020 if (PIdx != -1) { 1021 MachineOperand &PMO = MI.getOperand(PIdx); 1022 PMO.setReg(Pred[2].getReg()); 1023 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1024 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit); 1025 return true; 1026 } 1027 1028 return false; 1029 } 1030 1031 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const { 1032 return 2; 1033 } 1034 1035 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1036 const MachineInstr *MI, 1037 unsigned *PredCost) const { 1038 if (PredCost) 1039 *PredCost = 2; 1040 return 2; 1041 } 1042 1043 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex, 1044 unsigned Channel) const { 1045 assert(Channel == 0); 1046 return RegIndex; 1047 } 1048 1049 bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 1050 1051 switch(MI->getOpcode()) { 1052 default: { 1053 MachineBasicBlock *MBB = MI->getParent(); 1054 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1055 AMDGPU::OpName::addr); 1056 // addr is a custom operand with multiple MI operands, and only the 1057 // first MI operand is given a name. 1058 int RegOpIdx = OffsetOpIdx + 1; 1059 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1060 AMDGPU::OpName::chan); 1061 if (isRegisterLoad(*MI)) { 1062 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1063 AMDGPU::OpName::dst); 1064 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm(); 1065 unsigned Channel = MI->getOperand(ChanOpIdx).getImm(); 1066 unsigned Address = calculateIndirectAddress(RegIndex, Channel); 1067 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg(); 1068 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) { 1069 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(), 1070 getIndirectAddrRegClass()->getRegister(Address)); 1071 } else { 1072 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(), 1073 Address, OffsetReg); 1074 } 1075 } else if (isRegisterStore(*MI)) { 1076 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1077 AMDGPU::OpName::val); 1078 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm(); 1079 unsigned Channel = MI->getOperand(ChanOpIdx).getImm(); 1080 unsigned Address = calculateIndirectAddress(RegIndex, Channel); 1081 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg(); 1082 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) { 1083 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address), 1084 MI->getOperand(ValOpIdx).getReg()); 1085 } else { 1086 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(), 1087 calculateIndirectAddress(RegIndex, Channel), 1088 OffsetReg); 1089 } 1090 } else { 1091 return false; 1092 } 1093 1094 MBB->erase(MI); 1095 return true; 1096 } 1097 case AMDGPU::R600_EXTRACT_ELT_V2: 1098 case AMDGPU::R600_EXTRACT_ELT_V4: 1099 buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(), 1100 RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address 1101 MI->getOperand(2).getReg(), 1102 RI.getHWRegChan(MI->getOperand(1).getReg())); 1103 break; 1104 case AMDGPU::R600_INSERT_ELT_V2: 1105 case AMDGPU::R600_INSERT_ELT_V4: 1106 buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value 1107 RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address 1108 MI->getOperand(3).getReg(), // Offset 1109 RI.getHWRegChan(MI->getOperand(1).getReg())); // Channel 1110 break; 1111 } 1112 MI->eraseFromParent(); 1113 return true; 1114 } 1115 1116 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved, 1117 const MachineFunction &MF) const { 1118 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>( 1119 MF.getSubtarget().getFrameLowering()); 1120 1121 unsigned StackWidth = TFL->getStackWidth(MF); 1122 int End = getIndirectIndexEnd(MF); 1123 1124 if (End == -1) 1125 return; 1126 1127 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) { 1128 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index); 1129 Reserved.set(SuperReg); 1130 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) { 1131 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan); 1132 Reserved.set(Reg); 1133 } 1134 } 1135 } 1136 1137 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const { 1138 return &AMDGPU::R600_TReg32_XRegClass; 1139 } 1140 1141 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, 1142 MachineBasicBlock::iterator I, 1143 unsigned ValueReg, unsigned Address, 1144 unsigned OffsetReg) const { 1145 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0); 1146 } 1147 1148 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, 1149 MachineBasicBlock::iterator I, 1150 unsigned ValueReg, unsigned Address, 1151 unsigned OffsetReg, 1152 unsigned AddrChan) const { 1153 unsigned AddrReg; 1154 switch (AddrChan) { 1155 default: llvm_unreachable("Invalid Channel"); 1156 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break; 1157 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break; 1158 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break; 1159 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break; 1160 } 1161 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, 1162 AMDGPU::AR_X, OffsetReg); 1163 setImmOperand(MOVA, AMDGPU::OpName::write, 0); 1164 1165 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, 1166 AddrReg, ValueReg) 1167 .addReg(AMDGPU::AR_X, 1168 RegState::Implicit | RegState::Kill); 1169 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1); 1170 return Mov; 1171 } 1172 1173 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, 1174 MachineBasicBlock::iterator I, 1175 unsigned ValueReg, unsigned Address, 1176 unsigned OffsetReg) const { 1177 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0); 1178 } 1179 1180 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, 1181 MachineBasicBlock::iterator I, 1182 unsigned ValueReg, unsigned Address, 1183 unsigned OffsetReg, 1184 unsigned AddrChan) const { 1185 unsigned AddrReg; 1186 switch (AddrChan) { 1187 default: llvm_unreachable("Invalid Channel"); 1188 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break; 1189 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break; 1190 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break; 1191 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break; 1192 } 1193 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, 1194 AMDGPU::AR_X, 1195 OffsetReg); 1196 setImmOperand(MOVA, AMDGPU::OpName::write, 0); 1197 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, 1198 ValueReg, 1199 AddrReg) 1200 .addReg(AMDGPU::AR_X, 1201 RegState::Implicit | RegState::Kill); 1202 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1); 1203 1204 return Mov; 1205 } 1206 1207 unsigned R600InstrInfo::getMaxAlusPerClause() const { 1208 return 115; 1209 } 1210 1211 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB, 1212 MachineBasicBlock::iterator I, 1213 unsigned Opcode, 1214 unsigned DstReg, 1215 unsigned Src0Reg, 1216 unsigned Src1Reg) const { 1217 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode), 1218 DstReg); // $dst 1219 1220 if (Src1Reg) { 1221 MIB.addImm(0) // $update_exec_mask 1222 .addImm(0); // $update_predicate 1223 } 1224 MIB.addImm(1) // $write 1225 .addImm(0) // $omod 1226 .addImm(0) // $dst_rel 1227 .addImm(0) // $dst_clamp 1228 .addReg(Src0Reg) // $src0 1229 .addImm(0) // $src0_neg 1230 .addImm(0) // $src0_rel 1231 .addImm(0) // $src0_abs 1232 .addImm(-1); // $src0_sel 1233 1234 if (Src1Reg) { 1235 MIB.addReg(Src1Reg) // $src1 1236 .addImm(0) // $src1_neg 1237 .addImm(0) // $src1_rel 1238 .addImm(0) // $src1_abs 1239 .addImm(-1); // $src1_sel 1240 } 1241 1242 //XXX: The r600g finalizer expects this to be 1, once we've moved the 1243 //scheduling to the backend, we can change the default to 0. 1244 MIB.addImm(1) // $last 1245 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel 1246 .addImm(0) // $literal 1247 .addImm(0); // $bank_swizzle 1248 1249 return MIB; 1250 } 1251 1252 #define OPERAND_CASE(Label) \ 1253 case Label: { \ 1254 static const unsigned Ops[] = \ 1255 { \ 1256 Label##_X, \ 1257 Label##_Y, \ 1258 Label##_Z, \ 1259 Label##_W \ 1260 }; \ 1261 return Ops[Slot]; \ 1262 } 1263 1264 static unsigned getSlotedOps(unsigned Op, unsigned Slot) { 1265 switch (Op) { 1266 OPERAND_CASE(AMDGPU::OpName::update_exec_mask) 1267 OPERAND_CASE(AMDGPU::OpName::update_pred) 1268 OPERAND_CASE(AMDGPU::OpName::write) 1269 OPERAND_CASE(AMDGPU::OpName::omod) 1270 OPERAND_CASE(AMDGPU::OpName::dst_rel) 1271 OPERAND_CASE(AMDGPU::OpName::clamp) 1272 OPERAND_CASE(AMDGPU::OpName::src0) 1273 OPERAND_CASE(AMDGPU::OpName::src0_neg) 1274 OPERAND_CASE(AMDGPU::OpName::src0_rel) 1275 OPERAND_CASE(AMDGPU::OpName::src0_abs) 1276 OPERAND_CASE(AMDGPU::OpName::src0_sel) 1277 OPERAND_CASE(AMDGPU::OpName::src1) 1278 OPERAND_CASE(AMDGPU::OpName::src1_neg) 1279 OPERAND_CASE(AMDGPU::OpName::src1_rel) 1280 OPERAND_CASE(AMDGPU::OpName::src1_abs) 1281 OPERAND_CASE(AMDGPU::OpName::src1_sel) 1282 OPERAND_CASE(AMDGPU::OpName::pred_sel) 1283 default: 1284 llvm_unreachable("Wrong Operand"); 1285 } 1286 } 1287 1288 #undef OPERAND_CASE 1289 1290 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( 1291 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg) 1292 const { 1293 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented"); 1294 unsigned Opcode; 1295 if (ST.getGeneration() <= AMDGPUSubtarget::R700) 1296 Opcode = AMDGPU::DOT4_r600; 1297 else 1298 Opcode = AMDGPU::DOT4_eg; 1299 MachineBasicBlock::iterator I = MI; 1300 MachineOperand &Src0 = MI->getOperand( 1301 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot))); 1302 MachineOperand &Src1 = MI->getOperand( 1303 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot))); 1304 MachineInstr *MIB = buildDefaultInstruction( 1305 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg()); 1306 static const unsigned Operands[14] = { 1307 AMDGPU::OpName::update_exec_mask, 1308 AMDGPU::OpName::update_pred, 1309 AMDGPU::OpName::write, 1310 AMDGPU::OpName::omod, 1311 AMDGPU::OpName::dst_rel, 1312 AMDGPU::OpName::clamp, 1313 AMDGPU::OpName::src0_neg, 1314 AMDGPU::OpName::src0_rel, 1315 AMDGPU::OpName::src0_abs, 1316 AMDGPU::OpName::src0_sel, 1317 AMDGPU::OpName::src1_neg, 1318 AMDGPU::OpName::src1_rel, 1319 AMDGPU::OpName::src1_abs, 1320 AMDGPU::OpName::src1_sel, 1321 }; 1322 1323 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), 1324 getSlotedOps(AMDGPU::OpName::pred_sel, Slot))); 1325 MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel)) 1326 .setReg(MO.getReg()); 1327 1328 for (unsigned i = 0; i < 14; i++) { 1329 MachineOperand &MO = MI->getOperand( 1330 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot))); 1331 assert (MO.isImm()); 1332 setImmOperand(MIB, Operands[i], MO.getImm()); 1333 } 1334 MIB->getOperand(20).setImm(0); 1335 return MIB; 1336 } 1337 1338 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB, 1339 MachineBasicBlock::iterator I, 1340 unsigned DstReg, 1341 uint64_t Imm) const { 1342 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg, 1343 AMDGPU::ALU_LITERAL_X); 1344 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm); 1345 return MovImm; 1346 } 1347 1348 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB, 1349 MachineBasicBlock::iterator I, 1350 unsigned DstReg, unsigned SrcReg) const { 1351 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg); 1352 } 1353 1354 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const { 1355 return getOperandIdx(MI.getOpcode(), Op); 1356 } 1357 1358 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const { 1359 return AMDGPU::getNamedOperandIdx(Opcode, Op); 1360 } 1361 1362 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op, 1363 int64_t Imm) const { 1364 int Idx = getOperandIdx(*MI, Op); 1365 assert(Idx != -1 && "Operand not supported for this instruction."); 1366 assert(MI->getOperand(Idx).isImm()); 1367 MI->getOperand(Idx).setImm(Imm); 1368 } 1369 1370 //===----------------------------------------------------------------------===// 1371 // Instruction flag getters/setters 1372 //===----------------------------------------------------------------------===// 1373 1374 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const { 1375 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0; 1376 } 1377 1378 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx, 1379 unsigned Flag) const { 1380 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1381 int FlagIndex = 0; 1382 if (Flag != 0) { 1383 // If we pass something other than the default value of Flag to this 1384 // function, it means we are want to set a flag on an instruction 1385 // that uses native encoding. 1386 assert(HAS_NATIVE_OPERANDS(TargetFlags)); 1387 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3; 1388 switch (Flag) { 1389 case MO_FLAG_CLAMP: 1390 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp); 1391 break; 1392 case MO_FLAG_MASK: 1393 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write); 1394 break; 1395 case MO_FLAG_NOT_LAST: 1396 case MO_FLAG_LAST: 1397 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last); 1398 break; 1399 case MO_FLAG_NEG: 1400 switch (SrcIdx) { 1401 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break; 1402 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break; 1403 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break; 1404 } 1405 break; 1406 1407 case MO_FLAG_ABS: 1408 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 " 1409 "instructions."); 1410 (void)IsOP3; 1411 switch (SrcIdx) { 1412 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break; 1413 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break; 1414 } 1415 break; 1416 1417 default: 1418 FlagIndex = -1; 1419 break; 1420 } 1421 assert(FlagIndex != -1 && "Flag not supported for this instruction"); 1422 } else { 1423 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags); 1424 assert(FlagIndex != 0 && 1425 "Instruction flags not supported for this instruction"); 1426 } 1427 1428 MachineOperand &FlagOp = MI->getOperand(FlagIndex); 1429 assert(FlagOp.isImm()); 1430 return FlagOp; 1431 } 1432 1433 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand, 1434 unsigned Flag) const { 1435 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1436 if (Flag == 0) { 1437 return; 1438 } 1439 if (HAS_NATIVE_OPERANDS(TargetFlags)) { 1440 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); 1441 if (Flag == MO_FLAG_NOT_LAST) { 1442 clearFlag(MI, Operand, MO_FLAG_LAST); 1443 } else if (Flag == MO_FLAG_MASK) { 1444 clearFlag(MI, Operand, Flag); 1445 } else { 1446 FlagOp.setImm(1); 1447 } 1448 } else { 1449 MachineOperand &FlagOp = getFlagOp(MI, Operand); 1450 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand))); 1451 } 1452 } 1453 1454 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand, 1455 unsigned Flag) const { 1456 unsigned TargetFlags = get(MI->getOpcode()).TSFlags; 1457 if (HAS_NATIVE_OPERANDS(TargetFlags)) { 1458 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); 1459 FlagOp.setImm(0); 1460 } else { 1461 MachineOperand &FlagOp = getFlagOp(MI); 1462 unsigned InstFlags = FlagOp.getImm(); 1463 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand)); 1464 FlagOp.setImm(InstFlags); 1465 } 1466 } 1467 1468 bool R600InstrInfo::isRegisterStore(const MachineInstr &MI) const { 1469 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE; 1470 } 1471 1472 bool R600InstrInfo::isRegisterLoad(const MachineInstr &MI) const { 1473 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD; 1474 } 1475