1 //===-- R600ISelLowering.cpp - R600 DAG Lowering Implementation -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Custom DAG lowering for R600 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "R600ISelLowering.h" 16 #include "AMDGPUFrameLowering.h" 17 #include "AMDGPUIntrinsicInfo.h" 18 #include "AMDGPUSubtarget.h" 19 #include "R600Defines.h" 20 #include "R600FrameLowering.h" 21 #include "R600InstrInfo.h" 22 #include "R600MachineFunctionInfo.h" 23 #include "Utils/AMDGPUBaseInfo.h" 24 #include "llvm/ADT/APFloat.h" 25 #include "llvm/ADT/APInt.h" 26 #include "llvm/ADT/ArrayRef.h" 27 #include "llvm/ADT/DenseMap.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/CodeGen/CallingConvLower.h" 30 #include "llvm/CodeGen/DAGCombine.h" 31 #include "llvm/CodeGen/ISDOpcodes.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineInstr.h" 35 #include "llvm/CodeGen/MachineInstrBuilder.h" 36 #include "llvm/CodeGen/MachineMemOperand.h" 37 #include "llvm/CodeGen/MachineRegisterInfo.h" 38 #include "llvm/CodeGen/MachineValueType.h" 39 #include "llvm/CodeGen/SelectionDAG.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/Support/Casting.h" 43 #include "llvm/Support/Compiler.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include <cassert> 46 #include <cstdint> 47 #include <iterator> 48 #include <utility> 49 #include <vector> 50 51 using namespace llvm; 52 53 R600TargetLowering::R600TargetLowering(const TargetMachine &TM, 54 const R600Subtarget &STI) 55 : AMDGPUTargetLowering(TM, STI), Gen(STI.getGeneration()) { 56 addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass); 57 addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass); 58 addRegisterClass(MVT::v2f32, &AMDGPU::R600_Reg64RegClass); 59 addRegisterClass(MVT::v2i32, &AMDGPU::R600_Reg64RegClass); 60 addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass); 61 addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass); 62 63 computeRegisterProperties(STI.getRegisterInfo()); 64 65 // Legalize loads and stores to the private address space. 66 setOperationAction(ISD::LOAD, MVT::i32, Custom); 67 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 68 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 69 70 // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address 71 // spaces, so it is custom lowered to handle those where it isn't. 72 for (MVT VT : MVT::integer_valuetypes()) { 73 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 74 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Custom); 75 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Custom); 76 77 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 78 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Custom); 79 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Custom); 80 81 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 82 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Custom); 83 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Custom); 84 } 85 86 // Workaround for LegalizeDAG asserting on expansion of i1 vector loads. 87 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, MVT::v2i1, Expand); 88 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, MVT::v2i1, Expand); 89 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, MVT::v2i1, Expand); 90 91 setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i1, Expand); 92 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i1, Expand); 93 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i1, Expand); 94 95 setOperationAction(ISD::STORE, MVT::i8, Custom); 96 setOperationAction(ISD::STORE, MVT::i32, Custom); 97 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 98 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 99 100 setTruncStoreAction(MVT::i32, MVT::i8, Custom); 101 setTruncStoreAction(MVT::i32, MVT::i16, Custom); 102 // We need to include these since trunc STORES to PRIVATE need 103 // special handling to accommodate RMW 104 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom); 105 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Custom); 106 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Custom); 107 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Custom); 108 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Custom); 109 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom); 110 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom); 111 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Custom); 112 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Custom); 113 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Custom); 114 115 // Workaround for LegalizeDAG asserting on expansion of i1 vector stores. 116 setTruncStoreAction(MVT::v2i32, MVT::v2i1, Expand); 117 setTruncStoreAction(MVT::v4i32, MVT::v4i1, Expand); 118 119 // Set condition code actions 120 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 121 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 122 setCondCodeAction(ISD::SETLT, MVT::f32, Expand); 123 setCondCodeAction(ISD::SETLE, MVT::f32, Expand); 124 setCondCodeAction(ISD::SETOLT, MVT::f32, Expand); 125 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 126 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 127 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 128 setCondCodeAction(ISD::SETUGE, MVT::f32, Expand); 129 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 130 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 131 setCondCodeAction(ISD::SETULE, MVT::f32, Expand); 132 133 setCondCodeAction(ISD::SETLE, MVT::i32, Expand); 134 setCondCodeAction(ISD::SETLT, MVT::i32, Expand); 135 setCondCodeAction(ISD::SETULE, MVT::i32, Expand); 136 setCondCodeAction(ISD::SETULT, MVT::i32, Expand); 137 138 setOperationAction(ISD::FCOS, MVT::f32, Custom); 139 setOperationAction(ISD::FSIN, MVT::f32, Custom); 140 141 setOperationAction(ISD::SETCC, MVT::v4i32, Expand); 142 setOperationAction(ISD::SETCC, MVT::v2i32, Expand); 143 144 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 145 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 146 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 147 148 setOperationAction(ISD::FSUB, MVT::f32, Expand); 149 150 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 151 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 152 153 setOperationAction(ISD::SETCC, MVT::i32, Expand); 154 setOperationAction(ISD::SETCC, MVT::f32, Expand); 155 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom); 156 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Custom); 157 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 158 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 159 160 setOperationAction(ISD::SELECT, MVT::i32, Expand); 161 setOperationAction(ISD::SELECT, MVT::f32, Expand); 162 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 163 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 164 165 // ADD, SUB overflow. 166 // TODO: turn these into Legal? 167 if (Subtarget->hasCARRY()) 168 setOperationAction(ISD::UADDO, MVT::i32, Custom); 169 170 if (Subtarget->hasBORROW()) 171 setOperationAction(ISD::USUBO, MVT::i32, Custom); 172 173 // Expand sign extension of vectors 174 if (!Subtarget->hasBFE()) 175 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 176 177 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Expand); 178 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Expand); 179 180 if (!Subtarget->hasBFE()) 181 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 182 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Expand); 183 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Expand); 184 185 if (!Subtarget->hasBFE()) 186 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 187 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Expand); 188 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand); 189 190 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 191 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand); 192 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand); 193 194 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand); 195 196 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 197 198 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Custom); 199 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom); 200 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 201 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 202 203 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i32, Custom); 204 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Custom); 205 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 206 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 207 208 // We don't have 64-bit shifts. Thus we need either SHX i64 or SHX_PARTS i32 209 // to be Legal/Custom in order to avoid library calls. 210 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 211 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 212 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 213 214 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 215 216 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 217 for (MVT VT : ScalarIntVTs) { 218 setOperationAction(ISD::ADDC, VT, Expand); 219 setOperationAction(ISD::SUBC, VT, Expand); 220 setOperationAction(ISD::ADDE, VT, Expand); 221 setOperationAction(ISD::SUBE, VT, Expand); 222 } 223 224 setSchedulingPreference(Sched::Source); 225 226 setTargetDAGCombine(ISD::FP_ROUND); 227 setTargetDAGCombine(ISD::FP_TO_SINT); 228 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 229 setTargetDAGCombine(ISD::SELECT_CC); 230 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 231 setTargetDAGCombine(ISD::LOAD); 232 } 233 234 const R600Subtarget *R600TargetLowering::getSubtarget() const { 235 return static_cast<const R600Subtarget *>(Subtarget); 236 } 237 238 static inline bool isEOP(MachineBasicBlock::iterator I) { 239 if (std::next(I) == I->getParent()->end()) 240 return false; 241 return std::next(I)->getOpcode() == AMDGPU::RETURN; 242 } 243 244 MachineBasicBlock * 245 R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 246 MachineBasicBlock *BB) const { 247 MachineFunction *MF = BB->getParent(); 248 MachineRegisterInfo &MRI = MF->getRegInfo(); 249 MachineBasicBlock::iterator I = MI; 250 const R600InstrInfo *TII = getSubtarget()->getInstrInfo(); 251 252 switch (MI.getOpcode()) { 253 default: 254 // Replace LDS_*_RET instruction that don't have any uses with the 255 // equivalent LDS_*_NORET instruction. 256 if (TII->isLDSRetInstr(MI.getOpcode())) { 257 int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); 258 assert(DstIdx != -1); 259 MachineInstrBuilder NewMI; 260 // FIXME: getLDSNoRetOp method only handles LDS_1A1D LDS ops. Add 261 // LDS_1A2D support and remove this special case. 262 if (!MRI.use_empty(MI.getOperand(DstIdx).getReg()) || 263 MI.getOpcode() == AMDGPU::LDS_CMPST_RET) 264 return BB; 265 266 NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), 267 TII->get(AMDGPU::getLDSNoRetOp(MI.getOpcode()))); 268 for (unsigned i = 1, e = MI.getNumOperands(); i < e; ++i) { 269 NewMI.addOperand(MI.getOperand(i)); 270 } 271 } else { 272 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 273 } 274 break; 275 case AMDGPU::CLAMP_R600: { 276 MachineInstr *NewMI = TII->buildDefaultInstruction( 277 *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(), 278 MI.getOperand(1).getReg()); 279 TII->addFlag(*NewMI, 0, MO_FLAG_CLAMP); 280 break; 281 } 282 283 case AMDGPU::FABS_R600: { 284 MachineInstr *NewMI = TII->buildDefaultInstruction( 285 *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(), 286 MI.getOperand(1).getReg()); 287 TII->addFlag(*NewMI, 0, MO_FLAG_ABS); 288 break; 289 } 290 291 case AMDGPU::FNEG_R600: { 292 MachineInstr *NewMI = TII->buildDefaultInstruction( 293 *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(), 294 MI.getOperand(1).getReg()); 295 TII->addFlag(*NewMI, 0, MO_FLAG_NEG); 296 break; 297 } 298 299 case AMDGPU::MASK_WRITE: { 300 unsigned maskedRegister = MI.getOperand(0).getReg(); 301 assert(TargetRegisterInfo::isVirtualRegister(maskedRegister)); 302 MachineInstr * defInstr = MRI.getVRegDef(maskedRegister); 303 TII->addFlag(*defInstr, 0, MO_FLAG_MASK); 304 break; 305 } 306 307 case AMDGPU::MOV_IMM_F32: 308 TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(), MI.getOperand(1) 309 .getFPImm() 310 ->getValueAPF() 311 .bitcastToAPInt() 312 .getZExtValue()); 313 break; 314 315 case AMDGPU::MOV_IMM_I32: 316 TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(), 317 MI.getOperand(1).getImm()); 318 break; 319 320 case AMDGPU::MOV_IMM_GLOBAL_ADDR: { 321 //TODO: Perhaps combine this instruction with the next if possible 322 auto MIB = TII->buildDefaultInstruction( 323 *BB, MI, AMDGPU::MOV, MI.getOperand(0).getReg(), AMDGPU::ALU_LITERAL_X); 324 int Idx = TII->getOperandIdx(*MIB, AMDGPU::OpName::literal); 325 //TODO: Ugh this is rather ugly 326 MIB->getOperand(Idx) = MI.getOperand(1); 327 break; 328 } 329 330 case AMDGPU::CONST_COPY: { 331 MachineInstr *NewMI = TII->buildDefaultInstruction( 332 *BB, MI, AMDGPU::MOV, MI.getOperand(0).getReg(), AMDGPU::ALU_CONST); 333 TII->setImmOperand(*NewMI, AMDGPU::OpName::src0_sel, 334 MI.getOperand(1).getImm()); 335 break; 336 } 337 338 case AMDGPU::RAT_WRITE_CACHELESS_32_eg: 339 case AMDGPU::RAT_WRITE_CACHELESS_64_eg: 340 case AMDGPU::RAT_WRITE_CACHELESS_128_eg: 341 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) 342 .addOperand(MI.getOperand(0)) 343 .addOperand(MI.getOperand(1)) 344 .addImm(isEOP(I)); // Set End of program bit 345 break; 346 347 case AMDGPU::RAT_STORE_TYPED_eg: 348 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) 349 .addOperand(MI.getOperand(0)) 350 .addOperand(MI.getOperand(1)) 351 .addOperand(MI.getOperand(2)) 352 .addImm(isEOP(I)); // Set End of program bit 353 break; 354 355 case AMDGPU::BRANCH: 356 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP)) 357 .addOperand(MI.getOperand(0)); 358 break; 359 360 case AMDGPU::BRANCH_COND_f32: { 361 MachineInstr *NewMI = 362 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X), 363 AMDGPU::PREDICATE_BIT) 364 .addOperand(MI.getOperand(1)) 365 .addImm(AMDGPU::PRED_SETNE) 366 .addImm(0); // Flags 367 TII->addFlag(*NewMI, 0, MO_FLAG_PUSH); 368 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND)) 369 .addOperand(MI.getOperand(0)) 370 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 371 break; 372 } 373 374 case AMDGPU::BRANCH_COND_i32: { 375 MachineInstr *NewMI = 376 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X), 377 AMDGPU::PREDICATE_BIT) 378 .addOperand(MI.getOperand(1)) 379 .addImm(AMDGPU::PRED_SETNE_INT) 380 .addImm(0); // Flags 381 TII->addFlag(*NewMI, 0, MO_FLAG_PUSH); 382 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND)) 383 .addOperand(MI.getOperand(0)) 384 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); 385 break; 386 } 387 388 case AMDGPU::EG_ExportSwz: 389 case AMDGPU::R600_ExportSwz: { 390 // Instruction is left unmodified if its not the last one of its type 391 bool isLastInstructionOfItsType = true; 392 unsigned InstExportType = MI.getOperand(1).getImm(); 393 for (MachineBasicBlock::iterator NextExportInst = std::next(I), 394 EndBlock = BB->end(); NextExportInst != EndBlock; 395 NextExportInst = std::next(NextExportInst)) { 396 if (NextExportInst->getOpcode() == AMDGPU::EG_ExportSwz || 397 NextExportInst->getOpcode() == AMDGPU::R600_ExportSwz) { 398 unsigned CurrentInstExportType = NextExportInst->getOperand(1) 399 .getImm(); 400 if (CurrentInstExportType == InstExportType) { 401 isLastInstructionOfItsType = false; 402 break; 403 } 404 } 405 } 406 bool EOP = isEOP(I); 407 if (!EOP && !isLastInstructionOfItsType) 408 return BB; 409 unsigned CfInst = (MI.getOpcode() == AMDGPU::EG_ExportSwz) ? 84 : 40; 410 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) 411 .addOperand(MI.getOperand(0)) 412 .addOperand(MI.getOperand(1)) 413 .addOperand(MI.getOperand(2)) 414 .addOperand(MI.getOperand(3)) 415 .addOperand(MI.getOperand(4)) 416 .addOperand(MI.getOperand(5)) 417 .addOperand(MI.getOperand(6)) 418 .addImm(CfInst) 419 .addImm(EOP); 420 break; 421 } 422 case AMDGPU::RETURN: { 423 return BB; 424 } 425 } 426 427 MI.eraseFromParent(); 428 return BB; 429 } 430 431 //===----------------------------------------------------------------------===// 432 // Custom DAG Lowering Operations 433 //===----------------------------------------------------------------------===// 434 435 SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 436 MachineFunction &MF = DAG.getMachineFunction(); 437 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 438 switch (Op.getOpcode()) { 439 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 440 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 441 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 442 case ISD::SHL_PARTS: return LowerSHLParts(Op, DAG); 443 case ISD::SRA_PARTS: 444 case ISD::SRL_PARTS: return LowerSRXParts(Op, DAG); 445 case ISD::UADDO: return LowerUADDSUBO(Op, DAG, ISD::ADD, AMDGPUISD::CARRY); 446 case ISD::USUBO: return LowerUADDSUBO(Op, DAG, ISD::SUB, AMDGPUISD::BORROW); 447 case ISD::FCOS: 448 case ISD::FSIN: return LowerTrig(Op, DAG); 449 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 450 case ISD::STORE: return LowerSTORE(Op, DAG); 451 case ISD::LOAD: { 452 SDValue Result = LowerLOAD(Op, DAG); 453 assert((!Result.getNode() || 454 Result.getNode()->getNumValues() == 2) && 455 "Load should return a value and a chain"); 456 return Result; 457 } 458 459 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 460 case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG); 461 case ISD::FrameIndex: return lowerFrameIndex(Op, DAG); 462 case ISD::INTRINSIC_VOID: { 463 SDValue Chain = Op.getOperand(0); 464 unsigned IntrinsicID = 465 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 466 switch (IntrinsicID) { 467 case AMDGPUIntrinsic::r600_store_swizzle: { 468 SDLoc DL(Op); 469 const SDValue Args[8] = { 470 Chain, 471 Op.getOperand(2), // Export Value 472 Op.getOperand(3), // ArrayBase 473 Op.getOperand(4), // Type 474 DAG.getConstant(0, DL, MVT::i32), // SWZ_X 475 DAG.getConstant(1, DL, MVT::i32), // SWZ_Y 476 DAG.getConstant(2, DL, MVT::i32), // SWZ_Z 477 DAG.getConstant(3, DL, MVT::i32) // SWZ_W 478 }; 479 return DAG.getNode(AMDGPUISD::R600_EXPORT, DL, Op.getValueType(), Args); 480 } 481 482 // default for switch(IntrinsicID) 483 default: break; 484 } 485 // break out of case ISD::INTRINSIC_VOID in switch(Op.getOpcode()) 486 break; 487 } 488 case ISD::INTRINSIC_WO_CHAIN: { 489 unsigned IntrinsicID = 490 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 491 EVT VT = Op.getValueType(); 492 SDLoc DL(Op); 493 switch(IntrinsicID) { 494 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 495 case AMDGPUIntrinsic::r600_tex: 496 case AMDGPUIntrinsic::r600_texc: { 497 unsigned TextureOp; 498 switch (IntrinsicID) { 499 case AMDGPUIntrinsic::r600_tex: 500 TextureOp = 0; 501 break; 502 case AMDGPUIntrinsic::r600_texc: 503 TextureOp = 1; 504 break; 505 default: 506 llvm_unreachable("unhandled texture operation"); 507 } 508 509 SDValue TexArgs[19] = { 510 DAG.getConstant(TextureOp, DL, MVT::i32), 511 Op.getOperand(1), 512 DAG.getConstant(0, DL, MVT::i32), 513 DAG.getConstant(1, DL, MVT::i32), 514 DAG.getConstant(2, DL, MVT::i32), 515 DAG.getConstant(3, DL, MVT::i32), 516 Op.getOperand(2), 517 Op.getOperand(3), 518 Op.getOperand(4), 519 DAG.getConstant(0, DL, MVT::i32), 520 DAG.getConstant(1, DL, MVT::i32), 521 DAG.getConstant(2, DL, MVT::i32), 522 DAG.getConstant(3, DL, MVT::i32), 523 Op.getOperand(5), 524 Op.getOperand(6), 525 Op.getOperand(7), 526 Op.getOperand(8), 527 Op.getOperand(9), 528 Op.getOperand(10) 529 }; 530 return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs); 531 } 532 case AMDGPUIntrinsic::r600_dot4: { 533 SDValue Args[8] = { 534 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), 535 DAG.getConstant(0, DL, MVT::i32)), 536 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), 537 DAG.getConstant(0, DL, MVT::i32)), 538 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), 539 DAG.getConstant(1, DL, MVT::i32)), 540 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), 541 DAG.getConstant(1, DL, MVT::i32)), 542 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), 543 DAG.getConstant(2, DL, MVT::i32)), 544 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), 545 DAG.getConstant(2, DL, MVT::i32)), 546 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), 547 DAG.getConstant(3, DL, MVT::i32)), 548 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), 549 DAG.getConstant(3, DL, MVT::i32)) 550 }; 551 return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args); 552 } 553 554 case Intrinsic::r600_implicitarg_ptr: { 555 MVT PtrVT = getPointerTy(DAG.getDataLayout(), AMDGPUAS::PARAM_I_ADDRESS); 556 uint32_t ByteOffset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); 557 return DAG.getConstant(ByteOffset, DL, PtrVT); 558 } 559 case Intrinsic::r600_read_ngroups_x: 560 return LowerImplicitParameter(DAG, VT, DL, 0); 561 case Intrinsic::r600_read_ngroups_y: 562 return LowerImplicitParameter(DAG, VT, DL, 1); 563 case Intrinsic::r600_read_ngroups_z: 564 return LowerImplicitParameter(DAG, VT, DL, 2); 565 case Intrinsic::r600_read_global_size_x: 566 return LowerImplicitParameter(DAG, VT, DL, 3); 567 case Intrinsic::r600_read_global_size_y: 568 return LowerImplicitParameter(DAG, VT, DL, 4); 569 case Intrinsic::r600_read_global_size_z: 570 return LowerImplicitParameter(DAG, VT, DL, 5); 571 case Intrinsic::r600_read_local_size_x: 572 return LowerImplicitParameter(DAG, VT, DL, 6); 573 case Intrinsic::r600_read_local_size_y: 574 return LowerImplicitParameter(DAG, VT, DL, 7); 575 case Intrinsic::r600_read_local_size_z: 576 return LowerImplicitParameter(DAG, VT, DL, 8); 577 578 case Intrinsic::r600_read_tgid_x: 579 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 580 AMDGPU::T1_X, VT); 581 case Intrinsic::r600_read_tgid_y: 582 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 583 AMDGPU::T1_Y, VT); 584 case Intrinsic::r600_read_tgid_z: 585 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 586 AMDGPU::T1_Z, VT); 587 case Intrinsic::r600_read_tidig_x: 588 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 589 AMDGPU::T0_X, VT); 590 case Intrinsic::r600_read_tidig_y: 591 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 592 AMDGPU::T0_Y, VT); 593 case Intrinsic::r600_read_tidig_z: 594 return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, 595 AMDGPU::T0_Z, VT); 596 597 case Intrinsic::r600_recipsqrt_ieee: 598 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 599 600 case Intrinsic::r600_recipsqrt_clamped: 601 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 602 } 603 604 // break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode()) 605 break; 606 } 607 } // end switch(Op.getOpcode()) 608 return SDValue(); 609 } 610 611 void R600TargetLowering::ReplaceNodeResults(SDNode *N, 612 SmallVectorImpl<SDValue> &Results, 613 SelectionDAG &DAG) const { 614 switch (N->getOpcode()) { 615 default: 616 AMDGPUTargetLowering::ReplaceNodeResults(N, Results, DAG); 617 return; 618 case ISD::FP_TO_UINT: 619 if (N->getValueType(0) == MVT::i1) { 620 Results.push_back(lowerFP_TO_UINT(N->getOperand(0), DAG)); 621 return; 622 } 623 // Since we don't care about out of bounds values we can use FP_TO_SINT for 624 // uints too. The DAGLegalizer code for uint considers some extra cases 625 // which are not necessary here. 626 LLVM_FALLTHROUGH; 627 case ISD::FP_TO_SINT: { 628 if (N->getValueType(0) == MVT::i1) { 629 Results.push_back(lowerFP_TO_SINT(N->getOperand(0), DAG)); 630 return; 631 } 632 633 SDValue Result; 634 if (expandFP_TO_SINT(N, Result, DAG)) 635 Results.push_back(Result); 636 return; 637 } 638 case ISD::SDIVREM: { 639 SDValue Op = SDValue(N, 1); 640 SDValue RES = LowerSDIVREM(Op, DAG); 641 Results.push_back(RES); 642 Results.push_back(RES.getValue(1)); 643 break; 644 } 645 case ISD::UDIVREM: { 646 SDValue Op = SDValue(N, 0); 647 LowerUDIVREM64(Op, DAG, Results); 648 break; 649 } 650 } 651 } 652 653 SDValue R600TargetLowering::vectorToVerticalVector(SelectionDAG &DAG, 654 SDValue Vector) const { 655 SDLoc DL(Vector); 656 EVT VecVT = Vector.getValueType(); 657 EVT EltVT = VecVT.getVectorElementType(); 658 SmallVector<SDValue, 8> Args; 659 660 for (unsigned i = 0, e = VecVT.getVectorNumElements(); i != e; ++i) { 661 Args.push_back(DAG.getNode( 662 ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vector, 663 DAG.getConstant(i, DL, getVectorIdxTy(DAG.getDataLayout())))); 664 } 665 666 return DAG.getNode(AMDGPUISD::BUILD_VERTICAL_VECTOR, DL, VecVT, Args); 667 } 668 669 SDValue R600TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 670 SelectionDAG &DAG) const { 671 SDLoc DL(Op); 672 SDValue Vector = Op.getOperand(0); 673 SDValue Index = Op.getOperand(1); 674 675 if (isa<ConstantSDNode>(Index) || 676 Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR) 677 return Op; 678 679 Vector = vectorToVerticalVector(DAG, Vector); 680 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(), 681 Vector, Index); 682 } 683 684 SDValue R600TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 685 SelectionDAG &DAG) const { 686 SDLoc DL(Op); 687 SDValue Vector = Op.getOperand(0); 688 SDValue Value = Op.getOperand(1); 689 SDValue Index = Op.getOperand(2); 690 691 if (isa<ConstantSDNode>(Index) || 692 Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR) 693 return Op; 694 695 Vector = vectorToVerticalVector(DAG, Vector); 696 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, Op.getValueType(), 697 Vector, Value, Index); 698 return vectorToVerticalVector(DAG, Insert); 699 } 700 701 SDValue R600TargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 702 SDValue Op, 703 SelectionDAG &DAG) const { 704 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 705 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) 706 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 707 708 const DataLayout &DL = DAG.getDataLayout(); 709 const GlobalValue *GV = GSD->getGlobal(); 710 MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 711 712 SDValue GA = DAG.getTargetGlobalAddress(GV, SDLoc(GSD), ConstPtrVT); 713 return DAG.getNode(AMDGPUISD::CONST_DATA_PTR, SDLoc(GSD), ConstPtrVT, GA); 714 } 715 716 SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 717 // On hw >= R700, COS/SIN input must be between -1. and 1. 718 // Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5) 719 EVT VT = Op.getValueType(); 720 SDValue Arg = Op.getOperand(0); 721 SDLoc DL(Op); 722 723 // TODO: Should this propagate fast-math-flags? 724 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 725 DAG.getNode(ISD::FADD, DL, VT, 726 DAG.getNode(ISD::FMUL, DL, VT, Arg, 727 DAG.getConstantFP(0.15915494309, DL, MVT::f32)), 728 DAG.getConstantFP(0.5, DL, MVT::f32))); 729 unsigned TrigNode; 730 switch (Op.getOpcode()) { 731 case ISD::FCOS: 732 TrigNode = AMDGPUISD::COS_HW; 733 break; 734 case ISD::FSIN: 735 TrigNode = AMDGPUISD::SIN_HW; 736 break; 737 default: 738 llvm_unreachable("Wrong trig opcode"); 739 } 740 SDValue TrigVal = DAG.getNode(TrigNode, DL, VT, 741 DAG.getNode(ISD::FADD, DL, VT, FractPart, 742 DAG.getConstantFP(-0.5, DL, MVT::f32))); 743 if (Gen >= R600Subtarget::R700) 744 return TrigVal; 745 // On R600 hw, COS/SIN input must be between -Pi and Pi. 746 return DAG.getNode(ISD::FMUL, DL, VT, TrigVal, 747 DAG.getConstantFP(3.14159265359, DL, MVT::f32)); 748 } 749 750 SDValue R600TargetLowering::LowerSHLParts(SDValue Op, SelectionDAG &DAG) const { 751 SDLoc DL(Op); 752 EVT VT = Op.getValueType(); 753 754 SDValue Lo = Op.getOperand(0); 755 SDValue Hi = Op.getOperand(1); 756 SDValue Shift = Op.getOperand(2); 757 SDValue Zero = DAG.getConstant(0, DL, VT); 758 SDValue One = DAG.getConstant(1, DL, VT); 759 760 SDValue Width = DAG.getConstant(VT.getSizeInBits(), DL, VT); 761 SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT); 762 SDValue BigShift = DAG.getNode(ISD::SUB, DL, VT, Shift, Width); 763 SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift); 764 765 // The dance around Width1 is necessary for 0 special case. 766 // Without it the CompShift might be 32, producing incorrect results in 767 // Overflow. So we do the shift in two steps, the alternative is to 768 // add a conditional to filter the special case. 769 770 SDValue Overflow = DAG.getNode(ISD::SRL, DL, VT, Lo, CompShift); 771 Overflow = DAG.getNode(ISD::SRL, DL, VT, Overflow, One); 772 773 SDValue HiSmall = DAG.getNode(ISD::SHL, DL, VT, Hi, Shift); 774 HiSmall = DAG.getNode(ISD::OR, DL, VT, HiSmall, Overflow); 775 SDValue LoSmall = DAG.getNode(ISD::SHL, DL, VT, Lo, Shift); 776 777 SDValue HiBig = DAG.getNode(ISD::SHL, DL, VT, Lo, BigShift); 778 SDValue LoBig = Zero; 779 780 Hi = DAG.getSelectCC(DL, Shift, Width, HiSmall, HiBig, ISD::SETULT); 781 Lo = DAG.getSelectCC(DL, Shift, Width, LoSmall, LoBig, ISD::SETULT); 782 783 return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT,VT), Lo, Hi); 784 } 785 786 SDValue R600TargetLowering::LowerSRXParts(SDValue Op, SelectionDAG &DAG) const { 787 SDLoc DL(Op); 788 EVT VT = Op.getValueType(); 789 790 SDValue Lo = Op.getOperand(0); 791 SDValue Hi = Op.getOperand(1); 792 SDValue Shift = Op.getOperand(2); 793 SDValue Zero = DAG.getConstant(0, DL, VT); 794 SDValue One = DAG.getConstant(1, DL, VT); 795 796 const bool SRA = Op.getOpcode() == ISD::SRA_PARTS; 797 798 SDValue Width = DAG.getConstant(VT.getSizeInBits(), DL, VT); 799 SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT); 800 SDValue BigShift = DAG.getNode(ISD::SUB, DL, VT, Shift, Width); 801 SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift); 802 803 // The dance around Width1 is necessary for 0 special case. 804 // Without it the CompShift might be 32, producing incorrect results in 805 // Overflow. So we do the shift in two steps, the alternative is to 806 // add a conditional to filter the special case. 807 808 SDValue Overflow = DAG.getNode(ISD::SHL, DL, VT, Hi, CompShift); 809 Overflow = DAG.getNode(ISD::SHL, DL, VT, Overflow, One); 810 811 SDValue HiSmall = DAG.getNode(SRA ? ISD::SRA : ISD::SRL, DL, VT, Hi, Shift); 812 SDValue LoSmall = DAG.getNode(ISD::SRL, DL, VT, Lo, Shift); 813 LoSmall = DAG.getNode(ISD::OR, DL, VT, LoSmall, Overflow); 814 815 SDValue LoBig = DAG.getNode(SRA ? ISD::SRA : ISD::SRL, DL, VT, Hi, BigShift); 816 SDValue HiBig = SRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, Width1) : Zero; 817 818 Hi = DAG.getSelectCC(DL, Shift, Width, HiSmall, HiBig, ISD::SETULT); 819 Lo = DAG.getSelectCC(DL, Shift, Width, LoSmall, LoBig, ISD::SETULT); 820 821 return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT,VT), Lo, Hi); 822 } 823 824 SDValue R600TargetLowering::LowerUADDSUBO(SDValue Op, SelectionDAG &DAG, 825 unsigned mainop, unsigned ovf) const { 826 SDLoc DL(Op); 827 EVT VT = Op.getValueType(); 828 829 SDValue Lo = Op.getOperand(0); 830 SDValue Hi = Op.getOperand(1); 831 832 SDValue OVF = DAG.getNode(ovf, DL, VT, Lo, Hi); 833 // Extend sign. 834 OVF = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, OVF, 835 DAG.getValueType(MVT::i1)); 836 837 SDValue Res = DAG.getNode(mainop, DL, VT, Lo, Hi); 838 839 return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT, VT), Res, OVF); 840 } 841 842 SDValue R600TargetLowering::lowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const { 843 SDLoc DL(Op); 844 return DAG.getNode( 845 ISD::SETCC, 846 DL, 847 MVT::i1, 848 Op, DAG.getConstantFP(1.0f, DL, MVT::f32), 849 DAG.getCondCode(ISD::SETEQ)); 850 } 851 852 SDValue R600TargetLowering::lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const { 853 SDLoc DL(Op); 854 return DAG.getNode( 855 ISD::SETCC, 856 DL, 857 MVT::i1, 858 Op, DAG.getConstantFP(-1.0f, DL, MVT::f32), 859 DAG.getCondCode(ISD::SETEQ)); 860 } 861 862 SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT, 863 const SDLoc &DL, 864 unsigned DwordOffset) const { 865 unsigned ByteOffset = DwordOffset * 4; 866 PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), 867 AMDGPUAS::CONSTANT_BUFFER_0); 868 869 // We shouldn't be using an offset wider than 16-bits for implicit parameters. 870 assert(isInt<16>(ByteOffset)); 871 872 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 873 DAG.getConstant(ByteOffset, DL, MVT::i32), // PTR 874 MachinePointerInfo(ConstantPointerNull::get(PtrType))); 875 } 876 877 bool R600TargetLowering::isZero(SDValue Op) const { 878 if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) { 879 return Cst->isNullValue(); 880 } else if(ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op)){ 881 return CstFP->isZero(); 882 } else { 883 return false; 884 } 885 } 886 887 bool R600TargetLowering::isHWTrueValue(SDValue Op) const { 888 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) { 889 return CFP->isExactlyValue(1.0); 890 } 891 return isAllOnesConstant(Op); 892 } 893 894 bool R600TargetLowering::isHWFalseValue(SDValue Op) const { 895 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) { 896 return CFP->getValueAPF().isZero(); 897 } 898 return isNullConstant(Op); 899 } 900 901 SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 902 SDLoc DL(Op); 903 EVT VT = Op.getValueType(); 904 905 SDValue LHS = Op.getOperand(0); 906 SDValue RHS = Op.getOperand(1); 907 SDValue True = Op.getOperand(2); 908 SDValue False = Op.getOperand(3); 909 SDValue CC = Op.getOperand(4); 910 SDValue Temp; 911 912 if (VT == MVT::f32) { 913 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 914 SDValue MinMax = CombineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI); 915 if (MinMax) 916 return MinMax; 917 } 918 919 // LHS and RHS are guaranteed to be the same value type 920 EVT CompareVT = LHS.getValueType(); 921 922 // Check if we can lower this to a native operation. 923 924 // Try to lower to a SET* instruction: 925 // 926 // SET* can match the following patterns: 927 // 928 // select_cc f32, f32, -1, 0, cc_supported 929 // select_cc f32, f32, 1.0f, 0.0f, cc_supported 930 // select_cc i32, i32, -1, 0, cc_supported 931 // 932 933 // Move hardware True/False values to the correct operand. 934 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 935 ISD::CondCode InverseCC = 936 ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32); 937 if (isHWTrueValue(False) && isHWFalseValue(True)) { 938 if (isCondCodeLegal(InverseCC, CompareVT.getSimpleVT())) { 939 std::swap(False, True); 940 CC = DAG.getCondCode(InverseCC); 941 } else { 942 ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InverseCC); 943 if (isCondCodeLegal(SwapInvCC, CompareVT.getSimpleVT())) { 944 std::swap(False, True); 945 std::swap(LHS, RHS); 946 CC = DAG.getCondCode(SwapInvCC); 947 } 948 } 949 } 950 951 if (isHWTrueValue(True) && isHWFalseValue(False) && 952 (CompareVT == VT || VT == MVT::i32)) { 953 // This can be matched by a SET* instruction. 954 return DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, True, False, CC); 955 } 956 957 // Try to lower to a CND* instruction: 958 // 959 // CND* can match the following patterns: 960 // 961 // select_cc f32, 0.0, f32, f32, cc_supported 962 // select_cc f32, 0.0, i32, i32, cc_supported 963 // select_cc i32, 0, f32, f32, cc_supported 964 // select_cc i32, 0, i32, i32, cc_supported 965 // 966 967 // Try to move the zero value to the RHS 968 if (isZero(LHS)) { 969 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 970 // Try swapping the operands 971 ISD::CondCode CCSwapped = ISD::getSetCCSwappedOperands(CCOpcode); 972 if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) { 973 std::swap(LHS, RHS); 974 CC = DAG.getCondCode(CCSwapped); 975 } else { 976 // Try inverting the conditon and then swapping the operands 977 ISD::CondCode CCInv = ISD::getSetCCInverse(CCOpcode, CompareVT.isInteger()); 978 CCSwapped = ISD::getSetCCSwappedOperands(CCInv); 979 if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) { 980 std::swap(True, False); 981 std::swap(LHS, RHS); 982 CC = DAG.getCondCode(CCSwapped); 983 } 984 } 985 } 986 if (isZero(RHS)) { 987 SDValue Cond = LHS; 988 SDValue Zero = RHS; 989 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 990 if (CompareVT != VT) { 991 // Bitcast True / False to the correct types. This will end up being 992 // a nop, but it allows us to define only a single pattern in the 993 // .TD files for each CND* instruction rather than having to have 994 // one pattern for integer True/False and one for fp True/False 995 True = DAG.getNode(ISD::BITCAST, DL, CompareVT, True); 996 False = DAG.getNode(ISD::BITCAST, DL, CompareVT, False); 997 } 998 999 switch (CCOpcode) { 1000 case ISD::SETONE: 1001 case ISD::SETUNE: 1002 case ISD::SETNE: 1003 CCOpcode = ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32); 1004 Temp = True; 1005 True = False; 1006 False = Temp; 1007 break; 1008 default: 1009 break; 1010 } 1011 SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, 1012 Cond, Zero, 1013 True, False, 1014 DAG.getCondCode(CCOpcode)); 1015 return DAG.getNode(ISD::BITCAST, DL, VT, SelectNode); 1016 } 1017 1018 // If we make it this for it means we have no native instructions to handle 1019 // this SELECT_CC, so we must lower it. 1020 SDValue HWTrue, HWFalse; 1021 1022 if (CompareVT == MVT::f32) { 1023 HWTrue = DAG.getConstantFP(1.0f, DL, CompareVT); 1024 HWFalse = DAG.getConstantFP(0.0f, DL, CompareVT); 1025 } else if (CompareVT == MVT::i32) { 1026 HWTrue = DAG.getConstant(-1, DL, CompareVT); 1027 HWFalse = DAG.getConstant(0, DL, CompareVT); 1028 } 1029 else { 1030 llvm_unreachable("Unhandled value type in LowerSELECT_CC"); 1031 } 1032 1033 // Lower this unsupported SELECT_CC into a combination of two supported 1034 // SELECT_CC operations. 1035 SDValue Cond = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, LHS, RHS, HWTrue, HWFalse, CC); 1036 1037 return DAG.getNode(ISD::SELECT_CC, DL, VT, 1038 Cond, HWFalse, 1039 True, False, 1040 DAG.getCondCode(ISD::SETNE)); 1041 } 1042 1043 /// LLVM generates byte-addressed pointers. For indirect addressing, we need to 1044 /// convert these pointers to a register index. Each register holds 1045 /// 16 bytes, (4 x 32bit sub-register), but we need to take into account the 1046 /// \p StackWidth, which tells us how many of the 4 sub-registrers will be used 1047 /// for indirect addressing. 1048 SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr, 1049 unsigned StackWidth, 1050 SelectionDAG &DAG) const { 1051 unsigned SRLPad; 1052 switch(StackWidth) { 1053 case 1: 1054 SRLPad = 2; 1055 break; 1056 case 2: 1057 SRLPad = 3; 1058 break; 1059 case 4: 1060 SRLPad = 4; 1061 break; 1062 default: llvm_unreachable("Invalid stack width"); 1063 } 1064 1065 SDLoc DL(Ptr); 1066 return DAG.getNode(ISD::SRL, DL, Ptr.getValueType(), Ptr, 1067 DAG.getConstant(SRLPad, DL, MVT::i32)); 1068 } 1069 1070 void R600TargetLowering::getStackAddress(unsigned StackWidth, 1071 unsigned ElemIdx, 1072 unsigned &Channel, 1073 unsigned &PtrIncr) const { 1074 switch (StackWidth) { 1075 default: 1076 case 1: 1077 Channel = 0; 1078 if (ElemIdx > 0) { 1079 PtrIncr = 1; 1080 } else { 1081 PtrIncr = 0; 1082 } 1083 break; 1084 case 2: 1085 Channel = ElemIdx % 2; 1086 if (ElemIdx == 2) { 1087 PtrIncr = 1; 1088 } else { 1089 PtrIncr = 0; 1090 } 1091 break; 1092 case 4: 1093 Channel = ElemIdx; 1094 PtrIncr = 0; 1095 break; 1096 } 1097 } 1098 1099 SDValue R600TargetLowering::lowerPrivateTruncStore(StoreSDNode *Store, 1100 SelectionDAG &DAG) const { 1101 SDLoc DL(Store); 1102 //TODO: Who creates the i8 stores? 1103 assert(Store->isTruncatingStore() 1104 || Store->getValue().getValueType() == MVT::i8); 1105 assert(Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS); 1106 1107 SDValue Mask; 1108 if (Store->getMemoryVT() == MVT::i8) { 1109 assert(Store->getAlignment() >= 1); 1110 Mask = DAG.getConstant(0xff, DL, MVT::i32); 1111 } else if (Store->getMemoryVT() == MVT::i16) { 1112 assert(Store->getAlignment() >= 2); 1113 Mask = DAG.getConstant(0xffff, DL, MVT::i32);; 1114 } else { 1115 llvm_unreachable("Unsupported private trunc store"); 1116 } 1117 1118 SDValue Chain = Store->getChain(); 1119 SDValue BasePtr = Store->getBasePtr(); 1120 SDValue Offset = Store->getOffset(); 1121 EVT MemVT = Store->getMemoryVT(); 1122 1123 SDValue LoadPtr = BasePtr; 1124 if (!Offset.isUndef()) { 1125 LoadPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, Offset); 1126 } 1127 1128 // Get dword location 1129 // TODO: this should be eliminated by the future SHR ptr, 2 1130 SDValue Ptr = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr, 1131 DAG.getConstant(0xfffffffc, DL, MVT::i32)); 1132 1133 // Load dword 1134 // TODO: can we be smarter about machine pointer info? 1135 SDValue Dst = DAG.getLoad(MVT::i32, DL, Chain, Ptr, MachinePointerInfo()); 1136 1137 Chain = Dst.getValue(1); 1138 1139 // Get offset in dword 1140 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr, 1141 DAG.getConstant(0x3, DL, MVT::i32)); 1142 1143 // Convert byte offset to bit shift 1144 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx, 1145 DAG.getConstant(3, DL, MVT::i32)); 1146 1147 // TODO: Contrary to the name of the functiom, 1148 // it also handles sub i32 non-truncating stores (like i1) 1149 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, 1150 Store->getValue()); 1151 1152 // Mask the value to the right type 1153 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT); 1154 1155 // Shift the value in place 1156 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32, 1157 MaskedValue, ShiftAmt); 1158 1159 // Shift the mask in place 1160 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, Mask, ShiftAmt); 1161 1162 // Invert the mask. NOTE: if we had native ROL instructions we could 1163 // use inverted mask 1164 DstMask = DAG.getNOT(DL, DstMask, MVT::i32); 1165 1166 // Cleanup the target bits 1167 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask); 1168 1169 // Add the new bits 1170 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue); 1171 1172 // Store dword 1173 // TODO: Can we be smarter about MachinePointerInfo? 1174 return DAG.getStore(Chain, DL, Value, Ptr, MachinePointerInfo()); 1175 } 1176 1177 SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 1178 StoreSDNode *StoreNode = cast<StoreSDNode>(Op); 1179 unsigned AS = StoreNode->getAddressSpace(); 1180 1181 SDValue Chain = StoreNode->getChain(); 1182 SDValue Ptr = StoreNode->getBasePtr(); 1183 SDValue Value = StoreNode->getValue(); 1184 1185 EVT VT = Value.getValueType(); 1186 EVT MemVT = StoreNode->getMemoryVT(); 1187 EVT PtrVT = Ptr.getValueType(); 1188 1189 SDLoc DL(Op); 1190 1191 // Neither LOCAL nor PRIVATE can do vectors at the moment 1192 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS) && 1193 VT.isVector()) { 1194 return scalarizeVectorStore(StoreNode, DAG); 1195 } 1196 1197 unsigned Align = StoreNode->getAlignment(); 1198 if (Align < MemVT.getStoreSize() && 1199 !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) { 1200 return expandUnalignedStore(StoreNode, DAG); 1201 } 1202 1203 SDValue DWordAddr = DAG.getNode(ISD::SRL, DL, PtrVT, Ptr, 1204 DAG.getConstant(2, DL, PtrVT)); 1205 1206 if (AS == AMDGPUAS::GLOBAL_ADDRESS) { 1207 // It is beneficial to create MSKOR here instead of combiner to avoid 1208 // artificial dependencies introduced by RMW 1209 if (StoreNode->isTruncatingStore()) { 1210 assert(VT.bitsLE(MVT::i32)); 1211 SDValue MaskConstant; 1212 if (MemVT == MVT::i8) { 1213 MaskConstant = DAG.getConstant(0xFF, DL, MVT::i32); 1214 } else { 1215 assert(MemVT == MVT::i16); 1216 assert(StoreNode->getAlignment() >= 2); 1217 MaskConstant = DAG.getConstant(0xFFFF, DL, MVT::i32); 1218 } 1219 1220 SDValue ByteIndex = DAG.getNode(ISD::AND, DL, PtrVT, Ptr, 1221 DAG.getConstant(0x00000003, DL, PtrVT)); 1222 SDValue BitShift = DAG.getNode(ISD::SHL, DL, VT, ByteIndex, 1223 DAG.getConstant(3, DL, VT)); 1224 1225 // Put the mask in correct place 1226 SDValue Mask = DAG.getNode(ISD::SHL, DL, VT, MaskConstant, BitShift); 1227 1228 // Put the mask in correct place 1229 SDValue TruncValue = DAG.getNode(ISD::AND, DL, VT, Value, MaskConstant); 1230 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, VT, TruncValue, BitShift); 1231 1232 // XXX: If we add a 64-bit ZW register class, then we could use a 2 x i32 1233 // vector instead. 1234 SDValue Src[4] = { 1235 ShiftedValue, 1236 DAG.getConstant(0, DL, MVT::i32), 1237 DAG.getConstant(0, DL, MVT::i32), 1238 Mask 1239 }; 1240 SDValue Input = DAG.getBuildVector(MVT::v4i32, DL, Src); 1241 SDValue Args[3] = { Chain, Input, DWordAddr }; 1242 return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL, 1243 Op->getVTList(), Args, MemVT, 1244 StoreNode->getMemOperand()); 1245 } else if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR && VT.bitsGE(MVT::i32)) { 1246 // Convert pointer from byte address to dword address. 1247 Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, PtrVT, DWordAddr); 1248 1249 if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) { 1250 llvm_unreachable("Truncated and indexed stores not supported yet"); 1251 } else { 1252 Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand()); 1253 } 1254 return Chain; 1255 } 1256 } 1257 1258 // GLOBAL_ADDRESS has been handled above, LOCAL_ADDRESS allows all sizes 1259 if (AS != AMDGPUAS::PRIVATE_ADDRESS) 1260 return SDValue(); 1261 1262 if (MemVT.bitsLT(MVT::i32)) 1263 return lowerPrivateTruncStore(StoreNode, DAG); 1264 1265 // Standard i32+ store, tag it with DWORDADDR to note that the address 1266 // has been shifted 1267 if (Ptr.getOpcode() != AMDGPUISD::DWORDADDR) { 1268 Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, PtrVT, DWordAddr); 1269 return DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand()); 1270 } 1271 1272 // Tagged i32+ stores will be matched by patterns 1273 return SDValue(); 1274 } 1275 1276 // return (512 + (kc_bank << 12) 1277 static int 1278 ConstantAddressBlock(unsigned AddressSpace) { 1279 switch (AddressSpace) { 1280 case AMDGPUAS::CONSTANT_BUFFER_0: 1281 return 512; 1282 case AMDGPUAS::CONSTANT_BUFFER_1: 1283 return 512 + 4096; 1284 case AMDGPUAS::CONSTANT_BUFFER_2: 1285 return 512 + 4096 * 2; 1286 case AMDGPUAS::CONSTANT_BUFFER_3: 1287 return 512 + 4096 * 3; 1288 case AMDGPUAS::CONSTANT_BUFFER_4: 1289 return 512 + 4096 * 4; 1290 case AMDGPUAS::CONSTANT_BUFFER_5: 1291 return 512 + 4096 * 5; 1292 case AMDGPUAS::CONSTANT_BUFFER_6: 1293 return 512 + 4096 * 6; 1294 case AMDGPUAS::CONSTANT_BUFFER_7: 1295 return 512 + 4096 * 7; 1296 case AMDGPUAS::CONSTANT_BUFFER_8: 1297 return 512 + 4096 * 8; 1298 case AMDGPUAS::CONSTANT_BUFFER_9: 1299 return 512 + 4096 * 9; 1300 case AMDGPUAS::CONSTANT_BUFFER_10: 1301 return 512 + 4096 * 10; 1302 case AMDGPUAS::CONSTANT_BUFFER_11: 1303 return 512 + 4096 * 11; 1304 case AMDGPUAS::CONSTANT_BUFFER_12: 1305 return 512 + 4096 * 12; 1306 case AMDGPUAS::CONSTANT_BUFFER_13: 1307 return 512 + 4096 * 13; 1308 case AMDGPUAS::CONSTANT_BUFFER_14: 1309 return 512 + 4096 * 14; 1310 case AMDGPUAS::CONSTANT_BUFFER_15: 1311 return 512 + 4096 * 15; 1312 default: 1313 return -1; 1314 } 1315 } 1316 1317 SDValue R600TargetLowering::lowerPrivateExtLoad(SDValue Op, 1318 SelectionDAG &DAG) const { 1319 SDLoc DL(Op); 1320 LoadSDNode *Load = cast<LoadSDNode>(Op); 1321 ISD::LoadExtType ExtType = Load->getExtensionType(); 1322 EVT MemVT = Load->getMemoryVT(); 1323 assert(Load->getAlignment() >= MemVT.getStoreSize()); 1324 1325 SDValue BasePtr = Load->getBasePtr(); 1326 SDValue Chain = Load->getChain(); 1327 SDValue Offset = Load->getOffset(); 1328 1329 SDValue LoadPtr = BasePtr; 1330 if (!Offset.isUndef()) { 1331 LoadPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, Offset); 1332 } 1333 1334 // Get dword location 1335 // NOTE: this should be eliminated by the future SHR ptr, 2 1336 SDValue Ptr = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr, 1337 DAG.getConstant(0xfffffffc, DL, MVT::i32)); 1338 1339 // Load dword 1340 // TODO: can we be smarter about machine pointer info? 1341 SDValue Read = DAG.getLoad(MVT::i32, DL, Chain, Ptr, MachinePointerInfo()); 1342 1343 // Get offset within the register. 1344 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, 1345 LoadPtr, DAG.getConstant(0x3, DL, MVT::i32)); 1346 1347 // Bit offset of target byte (byteIdx * 8). 1348 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx, 1349 DAG.getConstant(3, DL, MVT::i32)); 1350 1351 // Shift to the right. 1352 SDValue Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Read, ShiftAmt); 1353 1354 // Eliminate the upper bits by setting them to ... 1355 EVT MemEltVT = MemVT.getScalarType(); 1356 1357 if (ExtType == ISD::SEXTLOAD) { // ... ones. 1358 SDValue MemEltVTNode = DAG.getValueType(MemEltVT); 1359 Ret = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode); 1360 } else { // ... or zeros. 1361 Ret = DAG.getZeroExtendInReg(Ret, DL, MemEltVT); 1362 } 1363 1364 SDValue Ops[] = { 1365 Ret, 1366 Read.getValue(1) // This should be our output chain 1367 }; 1368 1369 return DAG.getMergeValues(Ops, DL); 1370 } 1371 1372 SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 1373 LoadSDNode *LoadNode = cast<LoadSDNode>(Op); 1374 unsigned AS = LoadNode->getAddressSpace(); 1375 EVT MemVT = LoadNode->getMemoryVT(); 1376 ISD::LoadExtType ExtType = LoadNode->getExtensionType(); 1377 1378 if (AS == AMDGPUAS::PRIVATE_ADDRESS && 1379 ExtType != ISD::NON_EXTLOAD && MemVT.bitsLT(MVT::i32)) { 1380 return lowerPrivateExtLoad(Op, DAG); 1381 } 1382 1383 SDLoc DL(Op); 1384 EVT VT = Op.getValueType(); 1385 SDValue Chain = LoadNode->getChain(); 1386 SDValue Ptr = LoadNode->getBasePtr(); 1387 1388 if ((LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1389 LoadNode->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) && 1390 VT.isVector()) { 1391 return scalarizeVectorLoad(LoadNode, DAG); 1392 } 1393 1394 int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace()); 1395 if (ConstantBlock > -1 && 1396 ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) || 1397 (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) { 1398 SDValue Result; 1399 if (isa<ConstantExpr>(LoadNode->getMemOperand()->getValue()) || 1400 isa<Constant>(LoadNode->getMemOperand()->getValue()) || 1401 isa<ConstantSDNode>(Ptr)) { 1402 SDValue Slots[4]; 1403 for (unsigned i = 0; i < 4; i++) { 1404 // We want Const position encoded with the following formula : 1405 // (((512 + (kc_bank << 12) + const_index) << 2) + chan) 1406 // const_index is Ptr computed by llvm using an alignment of 16. 1407 // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and 1408 // then div by 4 at the ISel step 1409 SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 1410 DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32)); 1411 Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr); 1412 } 1413 EVT NewVT = MVT::v4i32; 1414 unsigned NumElements = 4; 1415 if (VT.isVector()) { 1416 NewVT = VT; 1417 NumElements = VT.getVectorNumElements(); 1418 } 1419 Result = DAG.getBuildVector(NewVT, DL, makeArrayRef(Slots, NumElements)); 1420 } else { 1421 // non-constant ptr can't be folded, keeps it as a v4f32 load 1422 Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32, 1423 DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, 1424 DAG.getConstant(4, DL, MVT::i32)), 1425 DAG.getConstant(LoadNode->getAddressSpace() - 1426 AMDGPUAS::CONSTANT_BUFFER_0, DL, MVT::i32) 1427 ); 1428 } 1429 1430 if (!VT.isVector()) { 1431 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result, 1432 DAG.getConstant(0, DL, MVT::i32)); 1433 } 1434 1435 SDValue MergedValues[2] = { 1436 Result, 1437 Chain 1438 }; 1439 return DAG.getMergeValues(MergedValues, DL); 1440 } 1441 1442 // For most operations returning SDValue() will result in the node being 1443 // expanded by the DAG Legalizer. This is not the case for ISD::LOAD, so we 1444 // need to manually expand loads that may be legal in some address spaces and 1445 // illegal in others. SEXT loads from CONSTANT_BUFFER_0 are supported for 1446 // compute shaders, since the data is sign extended when it is uploaded to the 1447 // buffer. However SEXT loads from other address spaces are not supported, so 1448 // we need to expand them here. 1449 if (LoadNode->getExtensionType() == ISD::SEXTLOAD) { 1450 EVT MemVT = LoadNode->getMemoryVT(); 1451 assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8)); 1452 SDValue NewLoad = DAG.getExtLoad( 1453 ISD::EXTLOAD, DL, VT, Chain, Ptr, LoadNode->getPointerInfo(), MemVT, 1454 LoadNode->getAlignment(), LoadNode->getMemOperand()->getFlags()); 1455 SDValue Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, NewLoad, 1456 DAG.getValueType(MemVT)); 1457 1458 SDValue MergedValues[2] = { Res, Chain }; 1459 return DAG.getMergeValues(MergedValues, DL); 1460 } 1461 1462 if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) { 1463 return SDValue(); 1464 } 1465 1466 // DWORDADDR ISD marks already shifted address 1467 if (Ptr.getOpcode() != AMDGPUISD::DWORDADDR) { 1468 assert(VT == MVT::i32); 1469 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(2, DL, MVT::i32)); 1470 Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, MVT::i32, Ptr); 1471 return DAG.getLoad(MVT::i32, DL, Chain, Ptr, LoadNode->getMemOperand()); 1472 } 1473 return SDValue(); 1474 } 1475 1476 SDValue R600TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 1477 SDValue Chain = Op.getOperand(0); 1478 SDValue Cond = Op.getOperand(1); 1479 SDValue Jump = Op.getOperand(2); 1480 1481 return DAG.getNode(AMDGPUISD::BRANCH_COND, SDLoc(Op), Op.getValueType(), 1482 Chain, Jump, Cond); 1483 } 1484 1485 SDValue R600TargetLowering::lowerFrameIndex(SDValue Op, 1486 SelectionDAG &DAG) const { 1487 MachineFunction &MF = DAG.getMachineFunction(); 1488 const R600FrameLowering *TFL = getSubtarget()->getFrameLowering(); 1489 1490 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op); 1491 1492 unsigned FrameIndex = FIN->getIndex(); 1493 unsigned IgnoredFrameReg; 1494 unsigned Offset = 1495 TFL->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg); 1496 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), SDLoc(Op), 1497 Op.getValueType()); 1498 } 1499 1500 /// XXX Only kernel functions are supported, so we can assume for now that 1501 /// every function is a kernel function, but in the future we should use 1502 /// separate calling conventions for kernel and non-kernel functions. 1503 SDValue R600TargetLowering::LowerFormalArguments( 1504 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1505 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1506 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1507 SmallVector<CCValAssign, 16> ArgLocs; 1508 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1509 *DAG.getContext()); 1510 MachineFunction &MF = DAG.getMachineFunction(); 1511 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 1512 1513 SmallVector<ISD::InputArg, 8> LocalIns; 1514 1515 if (AMDGPU::isShader(CallConv)) { 1516 AnalyzeFormalArguments(CCInfo, Ins); 1517 } else { 1518 analyzeFormalArgumentsCompute(CCInfo, Ins); 1519 } 1520 1521 for (unsigned i = 0, e = Ins.size(); i < e; ++i) { 1522 CCValAssign &VA = ArgLocs[i]; 1523 const ISD::InputArg &In = Ins[i]; 1524 EVT VT = In.VT; 1525 EVT MemVT = VA.getLocVT(); 1526 if (!VT.isVector() && MemVT.isVector()) { 1527 // Get load source type if scalarized. 1528 MemVT = MemVT.getVectorElementType(); 1529 } 1530 1531 if (AMDGPU::isShader(CallConv)) { 1532 unsigned Reg = MF.addLiveIn(VA.getLocReg(), &AMDGPU::R600_Reg128RegClass); 1533 SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1534 InVals.push_back(Register); 1535 continue; 1536 } 1537 1538 PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), 1539 AMDGPUAS::CONSTANT_BUFFER_0); 1540 1541 // i64 isn't a legal type, so the register type used ends up as i32, which 1542 // isn't expected here. It attempts to create this sextload, but it ends up 1543 // being invalid. Somehow this seems to work with i64 arguments, but breaks 1544 // for <1 x i64>. 1545 1546 // The first 36 bytes of the input buffer contains information about 1547 // thread group and global sizes. 1548 ISD::LoadExtType Ext = ISD::NON_EXTLOAD; 1549 if (MemVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) { 1550 // FIXME: This should really check the extload type, but the handling of 1551 // extload vector parameters seems to be broken. 1552 1553 // Ext = In.Flags.isSExt() ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 1554 Ext = ISD::SEXTLOAD; 1555 } 1556 1557 // Compute the offset from the value. 1558 // XXX - I think PartOffset should give you this, but it seems to give the 1559 // size of the register which isn't useful. 1560 1561 unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset(); 1562 unsigned PartOffset = VA.getLocMemOffset(); 1563 unsigned Offset = Subtarget->getExplicitKernelArgOffset() + VA.getLocMemOffset(); 1564 1565 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy), PartOffset - ValBase); 1566 SDValue Arg = DAG.getLoad( 1567 ISD::UNINDEXED, Ext, VT, DL, Chain, 1568 DAG.getConstant(Offset, DL, MVT::i32), DAG.getUNDEF(MVT::i32), PtrInfo, 1569 MemVT, /* Alignment = */ 4, MachineMemOperand::MONonTemporal | 1570 MachineMemOperand::MODereferenceable | 1571 MachineMemOperand::MOInvariant); 1572 1573 // 4 is the preferred alignment for the CONSTANT memory space. 1574 InVals.push_back(Arg); 1575 MFI->setABIArgOffset(Offset + MemVT.getStoreSize()); 1576 } 1577 return Chain; 1578 } 1579 1580 EVT R600TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1581 EVT VT) const { 1582 if (!VT.isVector()) 1583 return MVT::i32; 1584 return VT.changeVectorElementTypeToInteger(); 1585 } 1586 1587 bool R600TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 1588 unsigned AddrSpace, 1589 unsigned Align, 1590 bool *IsFast) const { 1591 if (IsFast) 1592 *IsFast = false; 1593 1594 if (!VT.isSimple() || VT == MVT::Other) 1595 return false; 1596 1597 if (VT.bitsLT(MVT::i32)) 1598 return false; 1599 1600 // TODO: This is a rough estimate. 1601 if (IsFast) 1602 *IsFast = true; 1603 1604 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 1605 } 1606 1607 static SDValue CompactSwizzlableVector( 1608 SelectionDAG &DAG, SDValue VectorEntry, 1609 DenseMap<unsigned, unsigned> &RemapSwizzle) { 1610 assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR); 1611 assert(RemapSwizzle.empty()); 1612 SDValue NewBldVec[4] = { 1613 VectorEntry.getOperand(0), 1614 VectorEntry.getOperand(1), 1615 VectorEntry.getOperand(2), 1616 VectorEntry.getOperand(3) 1617 }; 1618 1619 for (unsigned i = 0; i < 4; i++) { 1620 if (NewBldVec[i].isUndef()) 1621 // We mask write here to teach later passes that the ith element of this 1622 // vector is undef. Thus we can use it to reduce 128 bits reg usage, 1623 // break false dependencies and additionnaly make assembly easier to read. 1624 RemapSwizzle[i] = 7; // SEL_MASK_WRITE 1625 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(NewBldVec[i])) { 1626 if (C->isZero()) { 1627 RemapSwizzle[i] = 4; // SEL_0 1628 NewBldVec[i] = DAG.getUNDEF(MVT::f32); 1629 } else if (C->isExactlyValue(1.0)) { 1630 RemapSwizzle[i] = 5; // SEL_1 1631 NewBldVec[i] = DAG.getUNDEF(MVT::f32); 1632 } 1633 } 1634 1635 if (NewBldVec[i].isUndef()) 1636 continue; 1637 for (unsigned j = 0; j < i; j++) { 1638 if (NewBldVec[i] == NewBldVec[j]) { 1639 NewBldVec[i] = DAG.getUNDEF(NewBldVec[i].getValueType()); 1640 RemapSwizzle[i] = j; 1641 break; 1642 } 1643 } 1644 } 1645 1646 return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry), 1647 NewBldVec); 1648 } 1649 1650 static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry, 1651 DenseMap<unsigned, unsigned> &RemapSwizzle) { 1652 assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR); 1653 assert(RemapSwizzle.empty()); 1654 SDValue NewBldVec[4] = { 1655 VectorEntry.getOperand(0), 1656 VectorEntry.getOperand(1), 1657 VectorEntry.getOperand(2), 1658 VectorEntry.getOperand(3) 1659 }; 1660 bool isUnmovable[4] = { false, false, false, false }; 1661 for (unsigned i = 0; i < 4; i++) { 1662 RemapSwizzle[i] = i; 1663 if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 1664 unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1)) 1665 ->getZExtValue(); 1666 if (i == Idx) 1667 isUnmovable[Idx] = true; 1668 } 1669 } 1670 1671 for (unsigned i = 0; i < 4; i++) { 1672 if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 1673 unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1)) 1674 ->getZExtValue(); 1675 if (isUnmovable[Idx]) 1676 continue; 1677 // Swap i and Idx 1678 std::swap(NewBldVec[Idx], NewBldVec[i]); 1679 std::swap(RemapSwizzle[i], RemapSwizzle[Idx]); 1680 break; 1681 } 1682 } 1683 1684 return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry), 1685 NewBldVec); 1686 } 1687 1688 SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[4], 1689 SelectionDAG &DAG, 1690 const SDLoc &DL) const { 1691 assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR); 1692 // Old -> New swizzle values 1693 DenseMap<unsigned, unsigned> SwizzleRemap; 1694 1695 BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap); 1696 for (unsigned i = 0; i < 4; i++) { 1697 unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue(); 1698 if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) 1699 Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32); 1700 } 1701 1702 SwizzleRemap.clear(); 1703 BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap); 1704 for (unsigned i = 0; i < 4; i++) { 1705 unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue(); 1706 if (SwizzleRemap.find(Idx) != SwizzleRemap.end()) 1707 Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32); 1708 } 1709 1710 return BuildVector; 1711 } 1712 1713 //===----------------------------------------------------------------------===// 1714 // Custom DAG Optimizations 1715 //===----------------------------------------------------------------------===// 1716 1717 SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, 1718 DAGCombinerInfo &DCI) const { 1719 SelectionDAG &DAG = DCI.DAG; 1720 SDLoc DL(N); 1721 1722 switch (N->getOpcode()) { 1723 // (f32 fp_round (f64 uint_to_fp a)) -> (f32 uint_to_fp a) 1724 case ISD::FP_ROUND: { 1725 SDValue Arg = N->getOperand(0); 1726 if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) { 1727 return DAG.getNode(ISD::UINT_TO_FP, DL, N->getValueType(0), 1728 Arg.getOperand(0)); 1729 } 1730 break; 1731 } 1732 1733 // (i32 fp_to_sint (fneg (select_cc f32, f32, 1.0, 0.0 cc))) -> 1734 // (i32 select_cc f32, f32, -1, 0 cc) 1735 // 1736 // Mesa's GLSL frontend generates the above pattern a lot and we can lower 1737 // this to one of the SET*_DX10 instructions. 1738 case ISD::FP_TO_SINT: { 1739 SDValue FNeg = N->getOperand(0); 1740 if (FNeg.getOpcode() != ISD::FNEG) { 1741 return SDValue(); 1742 } 1743 SDValue SelectCC = FNeg.getOperand(0); 1744 if (SelectCC.getOpcode() != ISD::SELECT_CC || 1745 SelectCC.getOperand(0).getValueType() != MVT::f32 || // LHS 1746 SelectCC.getOperand(2).getValueType() != MVT::f32 || // True 1747 !isHWTrueValue(SelectCC.getOperand(2)) || 1748 !isHWFalseValue(SelectCC.getOperand(3))) { 1749 return SDValue(); 1750 } 1751 1752 return DAG.getNode(ISD::SELECT_CC, DL, N->getValueType(0), 1753 SelectCC.getOperand(0), // LHS 1754 SelectCC.getOperand(1), // RHS 1755 DAG.getConstant(-1, DL, MVT::i32), // True 1756 DAG.getConstant(0, DL, MVT::i32), // False 1757 SelectCC.getOperand(4)); // CC 1758 1759 break; 1760 } 1761 1762 // insert_vector_elt (build_vector elt0, ... , eltN), NewEltIdx, idx 1763 // => build_vector elt0, ... , NewEltIdx, ... , eltN 1764 case ISD::INSERT_VECTOR_ELT: { 1765 SDValue InVec = N->getOperand(0); 1766 SDValue InVal = N->getOperand(1); 1767 SDValue EltNo = N->getOperand(2); 1768 1769 // If the inserted element is an UNDEF, just use the input vector. 1770 if (InVal.isUndef()) 1771 return InVec; 1772 1773 EVT VT = InVec.getValueType(); 1774 1775 // If we can't generate a legal BUILD_VECTOR, exit 1776 if (!isOperationLegal(ISD::BUILD_VECTOR, VT)) 1777 return SDValue(); 1778 1779 // Check that we know which element is being inserted 1780 if (!isa<ConstantSDNode>(EltNo)) 1781 return SDValue(); 1782 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 1783 1784 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially 1785 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the 1786 // vector elements. 1787 SmallVector<SDValue, 8> Ops; 1788 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 1789 Ops.append(InVec.getNode()->op_begin(), 1790 InVec.getNode()->op_end()); 1791 } else if (InVec.isUndef()) { 1792 unsigned NElts = VT.getVectorNumElements(); 1793 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); 1794 } else { 1795 return SDValue(); 1796 } 1797 1798 // Insert the element 1799 if (Elt < Ops.size()) { 1800 // All the operands of BUILD_VECTOR must have the same type; 1801 // we enforce that here. 1802 EVT OpVT = Ops[0].getValueType(); 1803 if (InVal.getValueType() != OpVT) 1804 InVal = OpVT.bitsGT(InVal.getValueType()) ? 1805 DAG.getNode(ISD::ANY_EXTEND, DL, OpVT, InVal) : 1806 DAG.getNode(ISD::TRUNCATE, DL, OpVT, InVal); 1807 Ops[Elt] = InVal; 1808 } 1809 1810 // Return the new vector 1811 return DAG.getBuildVector(VT, DL, Ops); 1812 } 1813 1814 // Extract_vec (Build_vector) generated by custom lowering 1815 // also needs to be customly combined 1816 case ISD::EXTRACT_VECTOR_ELT: { 1817 SDValue Arg = N->getOperand(0); 1818 if (Arg.getOpcode() == ISD::BUILD_VECTOR) { 1819 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 1820 unsigned Element = Const->getZExtValue(); 1821 return Arg->getOperand(Element); 1822 } 1823 } 1824 if (Arg.getOpcode() == ISD::BITCAST && 1825 Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 1826 (Arg.getOperand(0).getValueType().getVectorNumElements() == 1827 Arg.getValueType().getVectorNumElements())) { 1828 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 1829 unsigned Element = Const->getZExtValue(); 1830 return DAG.getNode(ISD::BITCAST, DL, N->getVTList(), 1831 Arg->getOperand(0).getOperand(Element)); 1832 } 1833 } 1834 break; 1835 } 1836 1837 case ISD::SELECT_CC: { 1838 // Try common optimizations 1839 if (SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI)) 1840 return Ret; 1841 1842 // fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq -> 1843 // selectcc x, y, a, b, inv(cc) 1844 // 1845 // fold selectcc (selectcc x, y, a, b, cc), b, a, b, setne -> 1846 // selectcc x, y, a, b, cc 1847 SDValue LHS = N->getOperand(0); 1848 if (LHS.getOpcode() != ISD::SELECT_CC) { 1849 return SDValue(); 1850 } 1851 1852 SDValue RHS = N->getOperand(1); 1853 SDValue True = N->getOperand(2); 1854 SDValue False = N->getOperand(3); 1855 ISD::CondCode NCC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 1856 1857 if (LHS.getOperand(2).getNode() != True.getNode() || 1858 LHS.getOperand(3).getNode() != False.getNode() || 1859 RHS.getNode() != False.getNode()) { 1860 return SDValue(); 1861 } 1862 1863 switch (NCC) { 1864 default: return SDValue(); 1865 case ISD::SETNE: return LHS; 1866 case ISD::SETEQ: { 1867 ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get(); 1868 LHSCC = ISD::getSetCCInverse(LHSCC, 1869 LHS.getOperand(0).getValueType().isInteger()); 1870 if (DCI.isBeforeLegalizeOps() || 1871 isCondCodeLegal(LHSCC, LHS.getOperand(0).getSimpleValueType())) 1872 return DAG.getSelectCC(DL, 1873 LHS.getOperand(0), 1874 LHS.getOperand(1), 1875 LHS.getOperand(2), 1876 LHS.getOperand(3), 1877 LHSCC); 1878 break; 1879 } 1880 } 1881 return SDValue(); 1882 } 1883 1884 case AMDGPUISD::R600_EXPORT: { 1885 SDValue Arg = N->getOperand(1); 1886 if (Arg.getOpcode() != ISD::BUILD_VECTOR) 1887 break; 1888 1889 SDValue NewArgs[8] = { 1890 N->getOperand(0), // Chain 1891 SDValue(), 1892 N->getOperand(2), // ArrayBase 1893 N->getOperand(3), // Type 1894 N->getOperand(4), // SWZ_X 1895 N->getOperand(5), // SWZ_Y 1896 N->getOperand(6), // SWZ_Z 1897 N->getOperand(7) // SWZ_W 1898 }; 1899 NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG, DL); 1900 return DAG.getNode(AMDGPUISD::R600_EXPORT, DL, N->getVTList(), NewArgs); 1901 } 1902 case AMDGPUISD::TEXTURE_FETCH: { 1903 SDValue Arg = N->getOperand(1); 1904 if (Arg.getOpcode() != ISD::BUILD_VECTOR) 1905 break; 1906 1907 SDValue NewArgs[19] = { 1908 N->getOperand(0), 1909 N->getOperand(1), 1910 N->getOperand(2), 1911 N->getOperand(3), 1912 N->getOperand(4), 1913 N->getOperand(5), 1914 N->getOperand(6), 1915 N->getOperand(7), 1916 N->getOperand(8), 1917 N->getOperand(9), 1918 N->getOperand(10), 1919 N->getOperand(11), 1920 N->getOperand(12), 1921 N->getOperand(13), 1922 N->getOperand(14), 1923 N->getOperand(15), 1924 N->getOperand(16), 1925 N->getOperand(17), 1926 N->getOperand(18), 1927 }; 1928 NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG, DL); 1929 return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, N->getVTList(), NewArgs); 1930 } 1931 default: break; 1932 } 1933 1934 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 1935 } 1936 1937 bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx, 1938 SDValue &Src, SDValue &Neg, SDValue &Abs, 1939 SDValue &Sel, SDValue &Imm, 1940 SelectionDAG &DAG) const { 1941 const R600InstrInfo *TII = getSubtarget()->getInstrInfo(); 1942 if (!Src.isMachineOpcode()) 1943 return false; 1944 1945 switch (Src.getMachineOpcode()) { 1946 case AMDGPU::FNEG_R600: 1947 if (!Neg.getNode()) 1948 return false; 1949 Src = Src.getOperand(0); 1950 Neg = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32); 1951 return true; 1952 case AMDGPU::FABS_R600: 1953 if (!Abs.getNode()) 1954 return false; 1955 Src = Src.getOperand(0); 1956 Abs = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32); 1957 return true; 1958 case AMDGPU::CONST_COPY: { 1959 unsigned Opcode = ParentNode->getMachineOpcode(); 1960 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1; 1961 1962 if (!Sel.getNode()) 1963 return false; 1964 1965 SDValue CstOffset = Src.getOperand(0); 1966 if (ParentNode->getValueType(0).isVector()) 1967 return false; 1968 1969 // Gather constants values 1970 int SrcIndices[] = { 1971 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0), 1972 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1), 1973 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2), 1974 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X), 1975 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y), 1976 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z), 1977 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W), 1978 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X), 1979 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y), 1980 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z), 1981 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W) 1982 }; 1983 std::vector<unsigned> Consts; 1984 for (int OtherSrcIdx : SrcIndices) { 1985 int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx); 1986 if (OtherSrcIdx < 0 || OtherSelIdx < 0) 1987 continue; 1988 if (HasDst) { 1989 OtherSrcIdx--; 1990 OtherSelIdx--; 1991 } 1992 if (RegisterSDNode *Reg = 1993 dyn_cast<RegisterSDNode>(ParentNode->getOperand(OtherSrcIdx))) { 1994 if (Reg->getReg() == AMDGPU::ALU_CONST) { 1995 ConstantSDNode *Cst 1996 = cast<ConstantSDNode>(ParentNode->getOperand(OtherSelIdx)); 1997 Consts.push_back(Cst->getZExtValue()); 1998 } 1999 } 2000 } 2001 2002 ConstantSDNode *Cst = cast<ConstantSDNode>(CstOffset); 2003 Consts.push_back(Cst->getZExtValue()); 2004 if (!TII->fitsConstReadLimitations(Consts)) { 2005 return false; 2006 } 2007 2008 Sel = CstOffset; 2009 Src = DAG.getRegister(AMDGPU::ALU_CONST, MVT::f32); 2010 return true; 2011 } 2012 case AMDGPU::MOV_IMM_GLOBAL_ADDR: 2013 // Check if the Imm slot is used. Taken from below. 2014 if (cast<ConstantSDNode>(Imm)->getZExtValue()) 2015 return false; 2016 Imm = Src.getOperand(0); 2017 Src = DAG.getRegister(AMDGPU::ALU_LITERAL_X, MVT::i32); 2018 return true; 2019 case AMDGPU::MOV_IMM_I32: 2020 case AMDGPU::MOV_IMM_F32: { 2021 unsigned ImmReg = AMDGPU::ALU_LITERAL_X; 2022 uint64_t ImmValue = 0; 2023 2024 if (Src.getMachineOpcode() == AMDGPU::MOV_IMM_F32) { 2025 ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(Src.getOperand(0)); 2026 float FloatValue = FPC->getValueAPF().convertToFloat(); 2027 if (FloatValue == 0.0) { 2028 ImmReg = AMDGPU::ZERO; 2029 } else if (FloatValue == 0.5) { 2030 ImmReg = AMDGPU::HALF; 2031 } else if (FloatValue == 1.0) { 2032 ImmReg = AMDGPU::ONE; 2033 } else { 2034 ImmValue = FPC->getValueAPF().bitcastToAPInt().getZExtValue(); 2035 } 2036 } else { 2037 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src.getOperand(0)); 2038 uint64_t Value = C->getZExtValue(); 2039 if (Value == 0) { 2040 ImmReg = AMDGPU::ZERO; 2041 } else if (Value == 1) { 2042 ImmReg = AMDGPU::ONE_INT; 2043 } else { 2044 ImmValue = Value; 2045 } 2046 } 2047 2048 // Check that we aren't already using an immediate. 2049 // XXX: It's possible for an instruction to have more than one 2050 // immediate operand, but this is not supported yet. 2051 if (ImmReg == AMDGPU::ALU_LITERAL_X) { 2052 if (!Imm.getNode()) 2053 return false; 2054 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Imm); 2055 assert(C); 2056 if (C->getZExtValue()) 2057 return false; 2058 Imm = DAG.getTargetConstant(ImmValue, SDLoc(ParentNode), MVT::i32); 2059 } 2060 Src = DAG.getRegister(ImmReg, MVT::i32); 2061 return true; 2062 } 2063 default: 2064 return false; 2065 } 2066 } 2067 2068 /// \brief Fold the instructions after selecting them 2069 SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node, 2070 SelectionDAG &DAG) const { 2071 const R600InstrInfo *TII = getSubtarget()->getInstrInfo(); 2072 if (!Node->isMachineOpcode()) 2073 return Node; 2074 2075 unsigned Opcode = Node->getMachineOpcode(); 2076 SDValue FakeOp; 2077 2078 std::vector<SDValue> Ops(Node->op_begin(), Node->op_end()); 2079 2080 if (Opcode == AMDGPU::DOT_4) { 2081 int OperandIdx[] = { 2082 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X), 2083 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y), 2084 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z), 2085 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W), 2086 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X), 2087 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y), 2088 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z), 2089 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W) 2090 }; 2091 int NegIdx[] = { 2092 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X), 2093 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y), 2094 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z), 2095 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W), 2096 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X), 2097 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y), 2098 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z), 2099 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W) 2100 }; 2101 int AbsIdx[] = { 2102 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X), 2103 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y), 2104 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z), 2105 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W), 2106 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X), 2107 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y), 2108 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z), 2109 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W) 2110 }; 2111 for (unsigned i = 0; i < 8; i++) { 2112 if (OperandIdx[i] < 0) 2113 return Node; 2114 SDValue &Src = Ops[OperandIdx[i] - 1]; 2115 SDValue &Neg = Ops[NegIdx[i] - 1]; 2116 SDValue &Abs = Ops[AbsIdx[i] - 1]; 2117 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1; 2118 int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]); 2119 if (HasDst) 2120 SelIdx--; 2121 SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp; 2122 if (FoldOperand(Node, i, Src, Neg, Abs, Sel, FakeOp, DAG)) 2123 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 2124 } 2125 } else if (Opcode == AMDGPU::REG_SEQUENCE) { 2126 for (unsigned i = 1, e = Node->getNumOperands(); i < e; i += 2) { 2127 SDValue &Src = Ops[i]; 2128 if (FoldOperand(Node, i, Src, FakeOp, FakeOp, FakeOp, FakeOp, DAG)) 2129 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 2130 } 2131 } else if (Opcode == AMDGPU::CLAMP_R600) { 2132 SDValue Src = Node->getOperand(0); 2133 if (!Src.isMachineOpcode() || 2134 !TII->hasInstrModifiers(Src.getMachineOpcode())) 2135 return Node; 2136 int ClampIdx = TII->getOperandIdx(Src.getMachineOpcode(), 2137 AMDGPU::OpName::clamp); 2138 if (ClampIdx < 0) 2139 return Node; 2140 SDLoc DL(Node); 2141 std::vector<SDValue> Ops(Src->op_begin(), Src->op_end()); 2142 Ops[ClampIdx - 1] = DAG.getTargetConstant(1, DL, MVT::i32); 2143 return DAG.getMachineNode(Src.getMachineOpcode(), DL, 2144 Node->getVTList(), Ops); 2145 } else { 2146 if (!TII->hasInstrModifiers(Opcode)) 2147 return Node; 2148 int OperandIdx[] = { 2149 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0), 2150 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1), 2151 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2) 2152 }; 2153 int NegIdx[] = { 2154 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg), 2155 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg), 2156 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg) 2157 }; 2158 int AbsIdx[] = { 2159 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs), 2160 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs), 2161 -1 2162 }; 2163 for (unsigned i = 0; i < 3; i++) { 2164 if (OperandIdx[i] < 0) 2165 return Node; 2166 SDValue &Src = Ops[OperandIdx[i] - 1]; 2167 SDValue &Neg = Ops[NegIdx[i] - 1]; 2168 SDValue FakeAbs; 2169 SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs; 2170 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1; 2171 int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]); 2172 int ImmIdx = TII->getOperandIdx(Opcode, AMDGPU::OpName::literal); 2173 if (HasDst) { 2174 SelIdx--; 2175 ImmIdx--; 2176 } 2177 SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp; 2178 SDValue &Imm = Ops[ImmIdx]; 2179 if (FoldOperand(Node, i, Src, Neg, Abs, Sel, Imm, DAG)) 2180 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 2181 } 2182 } 2183 2184 return Node; 2185 } 2186