1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the targeting of the InstructionSelector class for 11 /// AMDGPU. 12 /// \todo This should be generated by TableGen. 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUInstructionSelector.h" 16 #include "AMDGPUInstrInfo.h" 17 #include "AMDGPURegisterBankInfo.h" 18 #include "AMDGPURegisterInfo.h" 19 #include "AMDGPUSubtarget.h" 20 #include "llvm/CodeGen/GlobalISel/Utils.h" 21 #include "llvm/CodeGen/MachineBasicBlock.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/raw_ostream.h" 29 30 #define DEBUG_TYPE "amdgpu-isel" 31 32 using namespace llvm; 33 34 AMDGPUInstructionSelector::AMDGPUInstructionSelector( 35 const SISubtarget &STI, const AMDGPURegisterBankInfo &RBI) 36 : InstructionSelector(), TII(*STI.getInstrInfo()), 37 TRI(*STI.getRegisterInfo()), RBI(RBI), AMDGPUASI(STI.getAMDGPUAS()) {} 38 39 MachineOperand 40 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, 41 unsigned SubIdx) const { 42 43 MachineInstr *MI = MO.getParent(); 44 MachineBasicBlock *BB = MO.getParent()->getParent(); 45 MachineFunction *MF = BB->getParent(); 46 MachineRegisterInfo &MRI = MF->getRegInfo(); 47 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 48 49 if (MO.isReg()) { 50 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); 51 unsigned Reg = MO.getReg(); 52 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) 53 .addReg(Reg, 0, ComposedSubIdx); 54 55 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), 56 MO.isKill(), MO.isDead(), MO.isUndef(), 57 MO.isEarlyClobber(), 0, MO.isDebug(), 58 MO.isInternalRead()); 59 } 60 61 assert(MO.isImm()); 62 63 APInt Imm(64, MO.getImm()); 64 65 switch (SubIdx) { 66 default: 67 llvm_unreachable("do not know to split immediate with this sub index."); 68 case AMDGPU::sub0: 69 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); 70 case AMDGPU::sub1: 71 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); 72 } 73 } 74 75 bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const { 76 MachineBasicBlock *BB = I.getParent(); 77 MachineFunction *MF = BB->getParent(); 78 MachineRegisterInfo &MRI = MF->getRegInfo(); 79 unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI); 80 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 81 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 82 83 if (Size != 64) 84 return false; 85 86 DebugLoc DL = I.getDebugLoc(); 87 88 MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0)); 89 MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0)); 90 91 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) 92 .add(Lo1) 93 .add(Lo2); 94 95 MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1)); 96 MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1)); 97 98 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) 99 .add(Hi1) 100 .add(Hi2); 101 102 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg()) 103 .addReg(DstLo) 104 .addImm(AMDGPU::sub0) 105 .addReg(DstHi) 106 .addImm(AMDGPU::sub1); 107 108 for (MachineOperand &MO : I.explicit_operands()) { 109 if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 110 continue; 111 RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI); 112 } 113 114 I.eraseFromParent(); 115 return true; 116 } 117 118 bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const { 119 return selectG_ADD(I); 120 } 121 122 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const { 123 MachineBasicBlock *BB = I.getParent(); 124 DebugLoc DL = I.getDebugLoc(); 125 126 // FIXME: Select store instruction based on address space 127 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(AMDGPU::FLAT_STORE_DWORD)) 128 .add(I.getOperand(1)) 129 .add(I.getOperand(0)) 130 .addImm(0) // offset 131 .addImm(0) // glc 132 .addImm(0); // slc 133 134 135 // Now that we selected an opcode, we need to constrain the register 136 // operands to use appropriate classes. 137 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI); 138 139 I.eraseFromParent(); 140 return Ret; 141 } 142 143 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { 144 MachineBasicBlock *BB = I.getParent(); 145 MachineFunction *MF = BB->getParent(); 146 MachineRegisterInfo &MRI = MF->getRegInfo(); 147 unsigned DstReg = I.getOperand(0).getReg(); 148 unsigned Size = RBI.getSizeInBits(DstReg, MRI, TRI); 149 150 if (Size == 32) { 151 I.setDesc(TII.get(AMDGPU::S_MOV_B32)); 152 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 153 } 154 155 assert(Size == 64); 156 157 DebugLoc DL = I.getDebugLoc(); 158 unsigned LoReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 159 unsigned HiReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 160 const APInt &Imm = I.getOperand(1).getCImm()->getValue(); 161 162 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), LoReg) 163 .addImm(Imm.trunc(32).getZExtValue()); 164 165 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg) 166 .addImm(Imm.ashr(32).getZExtValue()); 167 168 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 169 .addReg(LoReg) 170 .addImm(AMDGPU::sub0) 171 .addReg(HiReg) 172 .addImm(AMDGPU::sub1); 173 // We can't call constrainSelectedInstRegOperands here, because it doesn't 174 // work for target independent opcodes 175 I.eraseFromParent(); 176 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, MRI); 177 } 178 179 static bool isConstant(const MachineInstr &MI) { 180 return MI.getOpcode() == TargetOpcode::G_CONSTANT; 181 } 182 183 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, 184 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { 185 186 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); 187 188 assert(PtrMI); 189 190 if (PtrMI->getOpcode() != TargetOpcode::G_GEP) 191 return; 192 193 GEPInfo GEPInfo(*PtrMI); 194 195 for (unsigned i = 1, e = 3; i < e; ++i) { 196 const MachineOperand &GEPOp = PtrMI->getOperand(i); 197 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); 198 assert(OpDef); 199 if (isConstant(*OpDef)) { 200 // FIXME: Is it possible to have multiple Imm parts? Maybe if we 201 // are lacking other optimizations. 202 assert(GEPInfo.Imm == 0); 203 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); 204 continue; 205 } 206 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); 207 if (OpBank->getID() == AMDGPU::SGPRRegBankID) 208 GEPInfo.SgprParts.push_back(GEPOp.getReg()); 209 else 210 GEPInfo.VgprParts.push_back(GEPOp.getReg()); 211 } 212 213 AddrInfo.push_back(GEPInfo); 214 getAddrModeInfo(*PtrMI, MRI, AddrInfo); 215 } 216 217 static bool isInstrUniform(const MachineInstr &MI) { 218 if (!MI.hasOneMemOperand()) 219 return false; 220 221 const MachineMemOperand *MMO = *MI.memoperands_begin(); 222 const Value *Ptr = MMO->getValue(); 223 224 // UndefValue means this is a load of a kernel input. These are uniform. 225 // Sometimes LDS instructions have constant pointers. 226 // If Ptr is null, then that means this mem operand contains a 227 // PseudoSourceValue like GOT. 228 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 229 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 230 return true; 231 232 const Instruction *I = dyn_cast<Instruction>(Ptr); 233 return I && I->getMetadata("amdgpu.uniform"); 234 } 235 236 static unsigned getSmrdOpcode(unsigned BaseOpcode, unsigned LoadSize) { 237 238 if (LoadSize == 32) 239 return BaseOpcode; 240 241 switch (BaseOpcode) { 242 case AMDGPU::S_LOAD_DWORD_IMM: 243 switch (LoadSize) { 244 case 64: 245 return AMDGPU::S_LOAD_DWORDX2_IMM; 246 case 128: 247 return AMDGPU::S_LOAD_DWORDX4_IMM; 248 case 256: 249 return AMDGPU::S_LOAD_DWORDX8_IMM; 250 case 512: 251 return AMDGPU::S_LOAD_DWORDX16_IMM; 252 } 253 break; 254 case AMDGPU::S_LOAD_DWORD_IMM_ci: 255 switch (LoadSize) { 256 case 64: 257 return AMDGPU::S_LOAD_DWORDX2_IMM_ci; 258 case 128: 259 return AMDGPU::S_LOAD_DWORDX4_IMM_ci; 260 case 256: 261 return AMDGPU::S_LOAD_DWORDX8_IMM_ci; 262 case 512: 263 return AMDGPU::S_LOAD_DWORDX16_IMM_ci; 264 } 265 break; 266 case AMDGPU::S_LOAD_DWORD_SGPR: 267 switch (LoadSize) { 268 case 64: 269 return AMDGPU::S_LOAD_DWORDX2_SGPR; 270 case 128: 271 return AMDGPU::S_LOAD_DWORDX4_SGPR; 272 case 256: 273 return AMDGPU::S_LOAD_DWORDX8_SGPR; 274 case 512: 275 return AMDGPU::S_LOAD_DWORDX16_SGPR; 276 } 277 break; 278 } 279 llvm_unreachable("Invalid base smrd opcode or size"); 280 } 281 282 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { 283 for (const GEPInfo &GEPInfo : AddrInfo) { 284 if (!GEPInfo.VgprParts.empty()) 285 return true; 286 } 287 return false; 288 } 289 290 bool AMDGPUInstructionSelector::selectSMRD(MachineInstr &I, 291 ArrayRef<GEPInfo> AddrInfo) const { 292 293 if (!I.hasOneMemOperand()) 294 return false; 295 296 if ((*I.memoperands_begin())->getAddrSpace() != AMDGPUASI.CONSTANT_ADDRESS) 297 return false; 298 299 if (!isInstrUniform(I)) 300 return false; 301 302 if (hasVgprParts(AddrInfo)) 303 return false; 304 305 MachineBasicBlock *BB = I.getParent(); 306 MachineFunction *MF = BB->getParent(); 307 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>(); 308 MachineRegisterInfo &MRI = MF->getRegInfo(); 309 unsigned DstReg = I.getOperand(0).getReg(); 310 const DebugLoc &DL = I.getDebugLoc(); 311 unsigned Opcode; 312 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI); 313 314 if (!AddrInfo.empty() && AddrInfo[0].SgprParts.size() == 1) { 315 316 const GEPInfo &GEPInfo = AddrInfo[0]; 317 318 unsigned PtrReg = GEPInfo.SgprParts[0]; 319 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(Subtarget, GEPInfo.Imm); 320 if (AMDGPU::isLegalSMRDImmOffset(Subtarget, GEPInfo.Imm)) { 321 Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize); 322 323 MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg) 324 .addReg(PtrReg) 325 .addImm(EncodedImm) 326 .addImm(0); // glc 327 return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI); 328 } 329 330 if (Subtarget.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS && 331 isUInt<32>(EncodedImm)) { 332 Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM_ci, LoadSize); 333 MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg) 334 .addReg(PtrReg) 335 .addImm(EncodedImm) 336 .addImm(0); // glc 337 return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI); 338 } 339 340 if (isUInt<32>(GEPInfo.Imm)) { 341 Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_SGPR, LoadSize); 342 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 343 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), OffsetReg) 344 .addImm(GEPInfo.Imm); 345 346 MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg) 347 .addReg(PtrReg) 348 .addReg(OffsetReg) 349 .addImm(0); // glc 350 return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI); 351 } 352 } 353 354 unsigned PtrReg = I.getOperand(1).getReg(); 355 Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize); 356 MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg) 357 .addReg(PtrReg) 358 .addImm(0) 359 .addImm(0); // glc 360 return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI); 361 } 362 363 364 bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const { 365 MachineBasicBlock *BB = I.getParent(); 366 MachineFunction *MF = BB->getParent(); 367 MachineRegisterInfo &MRI = MF->getRegInfo(); 368 DebugLoc DL = I.getDebugLoc(); 369 unsigned DstReg = I.getOperand(0).getReg(); 370 unsigned PtrReg = I.getOperand(1).getReg(); 371 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI); 372 unsigned Opcode; 373 374 SmallVector<GEPInfo, 4> AddrInfo; 375 376 getAddrModeInfo(I, MRI, AddrInfo); 377 378 if (selectSMRD(I, AddrInfo)) { 379 I.eraseFromParent(); 380 return true; 381 } 382 383 switch (LoadSize) { 384 default: 385 llvm_unreachable("Load size not supported\n"); 386 case 32: 387 Opcode = AMDGPU::FLAT_LOAD_DWORD; 388 break; 389 case 64: 390 Opcode = AMDGPU::FLAT_LOAD_DWORDX2; 391 break; 392 } 393 394 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode)) 395 .add(I.getOperand(0)) 396 .addReg(PtrReg) 397 .addImm(0) // offset 398 .addImm(0) // glc 399 .addImm(0); // slc 400 401 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI); 402 I.eraseFromParent(); 403 return Ret; 404 } 405 406 bool AMDGPUInstructionSelector::select(MachineInstr &I, 407 CodeGenCoverage &CoverageInfo) const { 408 409 if (!isPreISelGenericOpcode(I.getOpcode())) 410 return true; 411 412 switch (I.getOpcode()) { 413 default: 414 break; 415 case TargetOpcode::G_ADD: 416 return selectG_ADD(I); 417 case TargetOpcode::G_CONSTANT: 418 return selectG_CONSTANT(I); 419 case TargetOpcode::G_GEP: 420 return selectG_GEP(I); 421 case TargetOpcode::G_LOAD: 422 return selectG_LOAD(I); 423 case TargetOpcode::G_STORE: 424 return selectG_STORE(I); 425 } 426 return false; 427 } 428