1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Target/TargetInstrInfo.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineMemOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/PseudoSourceValue.h" 20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 21 #include "llvm/CodeGen/StackMaps.h" 22 #include "llvm/CodeGen/TargetSchedule.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/MC/MCAsmInfo.h" 25 #include "llvm/MC/MCInstrItineraries.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Support/ErrorHandling.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include "llvm/Target/TargetFrameLowering.h" 30 #include "llvm/Target/TargetLowering.h" 31 #include "llvm/Target/TargetMachine.h" 32 #include "llvm/Target/TargetRegisterInfo.h" 33 #include <cctype> 34 35 using namespace llvm; 36 37 static cl::opt<bool> DisableHazardRecognizer( 38 "disable-sched-hazard", cl::Hidden, cl::init(false), 39 cl::desc("Disable hazard detection during preRA scheduling")); 40 41 TargetInstrInfo::~TargetInstrInfo() { 42 } 43 44 const TargetRegisterClass* 45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 46 const TargetRegisterInfo *TRI, 47 const MachineFunction &MF) const { 48 if (OpNum >= MCID.getNumOperands()) 49 return nullptr; 50 51 short RegClass = MCID.OpInfo[OpNum].RegClass; 52 if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) 53 return TRI->getPointerRegClass(MF, RegClass); 54 55 // Instructions like INSERT_SUBREG do not have fixed register classes. 56 if (RegClass < 0) 57 return nullptr; 58 59 // Otherwise just look it up normally. 60 return TRI->getRegClass(RegClass); 61 } 62 63 /// insertNoop - Insert a noop into the instruction stream at the specified 64 /// point. 65 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, 66 MachineBasicBlock::iterator MI) const { 67 llvm_unreachable("Target didn't implement insertNoop!"); 68 } 69 70 /// Measure the specified inline asm to determine an approximation of its 71 /// length. 72 /// Comments (which run till the next SeparatorString or newline) do not 73 /// count as an instruction. 74 /// Any other non-whitespace text is considered an instruction, with 75 /// multiple instructions separated by SeparatorString or newlines. 76 /// Variable-length instructions are not handled here; this function 77 /// may be overloaded in the target code to do that. 78 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str, 79 const MCAsmInfo &MAI) const { 80 // Count the number of instructions in the asm. 81 bool atInsnStart = true; 82 unsigned InstCount = 0; 83 for (; *Str; ++Str) { 84 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 85 strlen(MAI.getSeparatorString())) == 0) { 86 atInsnStart = true; 87 } else if (strncmp(Str, MAI.getCommentString(), 88 strlen(MAI.getCommentString())) == 0) { 89 // Stop counting as an instruction after a comment until the next 90 // separator. 91 atInsnStart = false; 92 } 93 94 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) { 95 ++InstCount; 96 atInsnStart = false; 97 } 98 } 99 100 return InstCount * MAI.getMaxInstLength(); 101 } 102 103 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 104 /// after it, replacing it with an unconditional branch to NewDest. 105 void 106 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 107 MachineBasicBlock *NewDest) const { 108 MachineBasicBlock *MBB = Tail->getParent(); 109 110 // Remove all the old successors of MBB from the CFG. 111 while (!MBB->succ_empty()) 112 MBB->removeSuccessor(MBB->succ_begin()); 113 114 // Save off the debug loc before erasing the instruction. 115 DebugLoc DL = Tail->getDebugLoc(); 116 117 // Remove all the dead instructions from the end of MBB. 118 MBB->erase(Tail, MBB->end()); 119 120 // If MBB isn't immediately before MBB, insert a branch to it. 121 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) 122 InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL); 123 MBB->addSuccessor(NewDest); 124 } 125 126 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, 127 bool NewMI, unsigned Idx1, 128 unsigned Idx2) const { 129 const MCInstrDesc &MCID = MI.getDesc(); 130 bool HasDef = MCID.getNumDefs(); 131 if (HasDef && !MI.getOperand(0).isReg()) 132 // No idea how to commute this instruction. Target should implement its own. 133 return nullptr; 134 135 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1; 136 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2; 137 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && 138 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && 139 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); 140 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && 141 "This only knows how to commute register operands so far"); 142 143 unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; 144 unsigned Reg1 = MI.getOperand(Idx1).getReg(); 145 unsigned Reg2 = MI.getOperand(Idx2).getReg(); 146 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; 147 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); 148 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); 149 bool Reg1IsKill = MI.getOperand(Idx1).isKill(); 150 bool Reg2IsKill = MI.getOperand(Idx2).isKill(); 151 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); 152 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); 153 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); 154 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); 155 // If destination is tied to either of the commuted source register, then 156 // it must be updated. 157 if (HasDef && Reg0 == Reg1 && 158 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { 159 Reg2IsKill = false; 160 Reg0 = Reg2; 161 SubReg0 = SubReg2; 162 } else if (HasDef && Reg0 == Reg2 && 163 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { 164 Reg1IsKill = false; 165 Reg0 = Reg1; 166 SubReg0 = SubReg1; 167 } 168 169 MachineInstr *CommutedMI = nullptr; 170 if (NewMI) { 171 // Create a new instruction. 172 MachineFunction &MF = *MI.getParent()->getParent(); 173 CommutedMI = MF.CloneMachineInstr(&MI); 174 } else { 175 CommutedMI = &MI; 176 } 177 178 if (HasDef) { 179 CommutedMI->getOperand(0).setReg(Reg0); 180 CommutedMI->getOperand(0).setSubReg(SubReg0); 181 } 182 CommutedMI->getOperand(Idx2).setReg(Reg1); 183 CommutedMI->getOperand(Idx1).setReg(Reg2); 184 CommutedMI->getOperand(Idx2).setSubReg(SubReg1); 185 CommutedMI->getOperand(Idx1).setSubReg(SubReg2); 186 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); 187 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); 188 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); 189 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); 190 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); 191 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); 192 return CommutedMI; 193 } 194 195 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, 196 unsigned OpIdx1, 197 unsigned OpIdx2) const { 198 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose 199 // any commutable operand, which is done in findCommutedOpIndices() method 200 // called below. 201 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && 202 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { 203 assert(MI.isCommutable() && 204 "Precondition violation: MI must be commutable."); 205 return nullptr; 206 } 207 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 208 } 209 210 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, 211 unsigned &ResultIdx2, 212 unsigned CommutableOpIdx1, 213 unsigned CommutableOpIdx2) { 214 if (ResultIdx1 == CommuteAnyOperandIndex && 215 ResultIdx2 == CommuteAnyOperandIndex) { 216 ResultIdx1 = CommutableOpIdx1; 217 ResultIdx2 = CommutableOpIdx2; 218 } else if (ResultIdx1 == CommuteAnyOperandIndex) { 219 if (ResultIdx2 == CommutableOpIdx1) 220 ResultIdx1 = CommutableOpIdx2; 221 else if (ResultIdx2 == CommutableOpIdx2) 222 ResultIdx1 = CommutableOpIdx1; 223 else 224 return false; 225 } else if (ResultIdx2 == CommuteAnyOperandIndex) { 226 if (ResultIdx1 == CommutableOpIdx1) 227 ResultIdx2 = CommutableOpIdx2; 228 else if (ResultIdx1 == CommutableOpIdx2) 229 ResultIdx2 = CommutableOpIdx1; 230 else 231 return false; 232 } else 233 // Check that the result operand indices match the given commutable 234 // operand indices. 235 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) || 236 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1); 237 238 return true; 239 } 240 241 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI, 242 unsigned &SrcOpIdx1, 243 unsigned &SrcOpIdx2) const { 244 assert(!MI.isBundle() && 245 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); 246 247 const MCInstrDesc &MCID = MI.getDesc(); 248 if (!MCID.isCommutable()) 249 return false; 250 251 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this 252 // is not true, then the target must implement this. 253 unsigned CommutableOpIdx1 = MCID.getNumDefs(); 254 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1; 255 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 256 CommutableOpIdx1, CommutableOpIdx2)) 257 return false; 258 259 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) 260 // No idea. 261 return false; 262 return true; 263 } 264 265 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { 266 if (!MI.isTerminator()) return false; 267 268 // Conditional branch is a special case. 269 if (MI.isBranch() && !MI.isBarrier()) 270 return true; 271 if (!MI.isPredicable()) 272 return true; 273 return !isPredicated(MI); 274 } 275 276 bool TargetInstrInfo::PredicateInstruction( 277 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 278 bool MadeChange = false; 279 280 assert(!MI.isBundle() && 281 "TargetInstrInfo::PredicateInstruction() can't handle bundles"); 282 283 const MCInstrDesc &MCID = MI.getDesc(); 284 if (!MI.isPredicable()) 285 return false; 286 287 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { 288 if (MCID.OpInfo[i].isPredicate()) { 289 MachineOperand &MO = MI.getOperand(i); 290 if (MO.isReg()) { 291 MO.setReg(Pred[j].getReg()); 292 MadeChange = true; 293 } else if (MO.isImm()) { 294 MO.setImm(Pred[j].getImm()); 295 MadeChange = true; 296 } else if (MO.isMBB()) { 297 MO.setMBB(Pred[j].getMBB()); 298 MadeChange = true; 299 } 300 ++j; 301 } 302 } 303 return MadeChange; 304 } 305 306 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr &MI, 307 const MachineMemOperand *&MMO, 308 int &FrameIndex) const { 309 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 310 oe = MI.memoperands_end(); 311 o != oe; ++o) { 312 if ((*o)->isLoad()) { 313 if (const FixedStackPseudoSourceValue *Value = 314 dyn_cast_or_null<FixedStackPseudoSourceValue>( 315 (*o)->getPseudoValue())) { 316 FrameIndex = Value->getFrameIndex(); 317 MMO = *o; 318 return true; 319 } 320 } 321 } 322 return false; 323 } 324 325 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr &MI, 326 const MachineMemOperand *&MMO, 327 int &FrameIndex) const { 328 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 329 oe = MI.memoperands_end(); 330 o != oe; ++o) { 331 if ((*o)->isStore()) { 332 if (const FixedStackPseudoSourceValue *Value = 333 dyn_cast_or_null<FixedStackPseudoSourceValue>( 334 (*o)->getPseudoValue())) { 335 FrameIndex = Value->getFrameIndex(); 336 MMO = *o; 337 return true; 338 } 339 } 340 } 341 return false; 342 } 343 344 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, 345 unsigned SubIdx, unsigned &Size, 346 unsigned &Offset, 347 const MachineFunction &MF) const { 348 if (!SubIdx) { 349 Size = RC->getSize(); 350 Offset = 0; 351 return true; 352 } 353 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 354 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); 355 // Convert bit size to byte size to be consistent with 356 // MCRegisterClass::getSize(). 357 if (BitSize % 8) 358 return false; 359 360 int BitOffset = TRI->getSubRegIdxOffset(SubIdx); 361 if (BitOffset < 0 || BitOffset % 8) 362 return false; 363 364 Size = BitSize /= 8; 365 Offset = (unsigned)BitOffset / 8; 366 367 assert(RC->getSize() >= (Offset + Size) && "bad subregister range"); 368 369 if (!MF.getDataLayout().isLittleEndian()) { 370 Offset = RC->getSize() - (Offset + Size); 371 } 372 return true; 373 } 374 375 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, 376 MachineBasicBlock::iterator I, 377 unsigned DestReg, unsigned SubIdx, 378 const MachineInstr &Orig, 379 const TargetRegisterInfo &TRI) const { 380 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 381 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); 382 MBB.insert(I, MI); 383 } 384 385 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, 386 const MachineInstr &MI1, 387 const MachineRegisterInfo *MRI) const { 388 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 389 } 390 391 MachineInstr *TargetInstrInfo::duplicate(MachineInstr &Orig, 392 MachineFunction &MF) const { 393 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); 394 return MF.CloneMachineInstr(&Orig); 395 } 396 397 // If the COPY instruction in MI can be folded to a stack operation, return 398 // the register class to use. 399 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, 400 unsigned FoldIdx) { 401 assert(MI.isCopy() && "MI must be a COPY instruction"); 402 if (MI.getNumOperands() != 2) 403 return nullptr; 404 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); 405 406 const MachineOperand &FoldOp = MI.getOperand(FoldIdx); 407 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); 408 409 if (FoldOp.getSubReg() || LiveOp.getSubReg()) 410 return nullptr; 411 412 unsigned FoldReg = FoldOp.getReg(); 413 unsigned LiveReg = LiveOp.getReg(); 414 415 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) && 416 "Cannot fold physregs"); 417 418 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 419 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); 420 421 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg())) 422 return RC->contains(LiveOp.getReg()) ? RC : nullptr; 423 424 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) 425 return RC; 426 427 // FIXME: Allow folding when register classes are memory compatible. 428 return nullptr; 429 } 430 431 void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { 432 llvm_unreachable("Not a MachO target"); 433 } 434 435 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, 436 ArrayRef<unsigned> Ops, int FrameIndex, 437 const TargetInstrInfo &TII) { 438 unsigned StartIdx = 0; 439 switch (MI.getOpcode()) { 440 case TargetOpcode::STACKMAP: { 441 // StackMapLiveValues are foldable 442 StackMapOpers opers(&MI); 443 StartIdx = opers.getVarIdx(); 444 break; 445 } 446 case TargetOpcode::PATCHPOINT: { 447 // For PatchPoint, the call args are not foldable (even if reported in the 448 // stackmap e.g. via anyregcc). 449 PatchPointOpers opers(&MI); 450 StartIdx = opers.getVarIdx(); 451 break; 452 } 453 default: 454 llvm_unreachable("unexpected stackmap opcode"); 455 } 456 457 // Return false if any operands requested for folding are not foldable (not 458 // part of the stackmap's live values). 459 for (unsigned Op : Ops) { 460 if (Op < StartIdx) 461 return nullptr; 462 } 463 464 MachineInstr *NewMI = 465 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); 466 MachineInstrBuilder MIB(MF, NewMI); 467 468 // No need to fold return, the meta data, and function arguments 469 for (unsigned i = 0; i < StartIdx; ++i) 470 MIB.addOperand(MI.getOperand(i)); 471 472 for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) { 473 MachineOperand &MO = MI.getOperand(i); 474 if (is_contained(Ops, i)) { 475 unsigned SpillSize; 476 unsigned SpillOffset; 477 // Compute the spill slot size and offset. 478 const TargetRegisterClass *RC = 479 MF.getRegInfo().getRegClass(MO.getReg()); 480 bool Valid = 481 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF); 482 if (!Valid) 483 report_fatal_error("cannot spill patchpoint subregister operand"); 484 MIB.addImm(StackMaps::IndirectMemRefOp); 485 MIB.addImm(SpillSize); 486 MIB.addFrameIndex(FrameIndex); 487 MIB.addImm(SpillOffset); 488 } 489 else 490 MIB.addOperand(MO); 491 } 492 return NewMI; 493 } 494 495 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack 496 /// slot into the specified machine instruction for the specified operand(s). 497 /// If this is possible, a new instruction is returned with the specified 498 /// operand folded, otherwise NULL is returned. The client is responsible for 499 /// removing the old instruction and adding the new one in the instruction 500 /// stream. 501 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 502 ArrayRef<unsigned> Ops, int FI, 503 LiveIntervals *LIS) const { 504 auto Flags = MachineMemOperand::MONone; 505 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 506 if (MI.getOperand(Ops[i]).isDef()) 507 Flags |= MachineMemOperand::MOStore; 508 else 509 Flags |= MachineMemOperand::MOLoad; 510 511 MachineBasicBlock *MBB = MI.getParent(); 512 assert(MBB && "foldMemoryOperand needs an inserted instruction"); 513 MachineFunction &MF = *MBB->getParent(); 514 515 MachineInstr *NewMI = nullptr; 516 517 if (MI.getOpcode() == TargetOpcode::STACKMAP || 518 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 519 // Fold stackmap/patchpoint. 520 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); 521 if (NewMI) 522 MBB->insert(MI, NewMI); 523 } else { 524 // Ask the target to do the actual folding. 525 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS); 526 } 527 528 if (NewMI) { 529 NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 530 // Add a memory operand, foldMemoryOperandImpl doesn't do that. 531 assert((!(Flags & MachineMemOperand::MOStore) || 532 NewMI->mayStore()) && 533 "Folded a def to a non-store!"); 534 assert((!(Flags & MachineMemOperand::MOLoad) || 535 NewMI->mayLoad()) && 536 "Folded a use to a non-load!"); 537 const MachineFrameInfo &MFI = MF.getFrameInfo(); 538 assert(MFI.getObjectOffset(FI) != -1); 539 MachineMemOperand *MMO = MF.getMachineMemOperand( 540 MachinePointerInfo::getFixedStack(MF, FI), Flags, MFI.getObjectSize(FI), 541 MFI.getObjectAlignment(FI)); 542 NewMI->addMemOperand(MF, MMO); 543 544 return NewMI; 545 } 546 547 // Straight COPY may fold as load/store. 548 if (!MI.isCopy() || Ops.size() != 1) 549 return nullptr; 550 551 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); 552 if (!RC) 553 return nullptr; 554 555 const MachineOperand &MO = MI.getOperand(1 - Ops[0]); 556 MachineBasicBlock::iterator Pos = MI; 557 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 558 559 if (Flags == MachineMemOperand::MOStore) 560 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); 561 else 562 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); 563 return &*--Pos; 564 } 565 566 bool TargetInstrInfo::hasReassociableOperands( 567 const MachineInstr &Inst, const MachineBasicBlock *MBB) const { 568 const MachineOperand &Op1 = Inst.getOperand(1); 569 const MachineOperand &Op2 = Inst.getOperand(2); 570 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 571 572 // We need virtual register definitions for the operands that we will 573 // reassociate. 574 MachineInstr *MI1 = nullptr; 575 MachineInstr *MI2 = nullptr; 576 if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg())) 577 MI1 = MRI.getUniqueVRegDef(Op1.getReg()); 578 if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg())) 579 MI2 = MRI.getUniqueVRegDef(Op2.getReg()); 580 581 // And they need to be in the trace (otherwise, they won't have a depth). 582 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB; 583 } 584 585 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst, 586 bool &Commuted) const { 587 const MachineBasicBlock *MBB = Inst.getParent(); 588 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 589 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg()); 590 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg()); 591 unsigned AssocOpcode = Inst.getOpcode(); 592 593 // If only one operand has the same opcode and it's the second source operand, 594 // the operands must be commuted. 595 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode; 596 if (Commuted) 597 std::swap(MI1, MI2); 598 599 // 1. The previous instruction must be the same type as Inst. 600 // 2. The previous instruction must have virtual register definitions for its 601 // operands in the same basic block as Inst. 602 // 3. The previous instruction's result must only be used by Inst. 603 return MI1->getOpcode() == AssocOpcode && 604 hasReassociableOperands(*MI1, MBB) && 605 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()); 606 } 607 608 // 1. The operation must be associative and commutative. 609 // 2. The instruction must have virtual register definitions for its 610 // operands in the same basic block. 611 // 3. The instruction must have a reassociable sibling. 612 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, 613 bool &Commuted) const { 614 return isAssociativeAndCommutative(Inst) && 615 hasReassociableOperands(Inst, Inst.getParent()) && 616 hasReassociableSibling(Inst, Commuted); 617 } 618 619 // The concept of the reassociation pass is that these operations can benefit 620 // from this kind of transformation: 621 // 622 // A = ? op ? 623 // B = A op X (Prev) 624 // C = B op Y (Root) 625 // --> 626 // A = ? op ? 627 // B = X op Y 628 // C = A op B 629 // 630 // breaking the dependency between A and B, allowing them to be executed in 631 // parallel (or back-to-back in a pipeline) instead of depending on each other. 632 633 // FIXME: This has the potential to be expensive (compile time) while not 634 // improving the code at all. Some ways to limit the overhead: 635 // 1. Track successful transforms; bail out if hit rate gets too low. 636 // 2. Only enable at -O3 or some other non-default optimization level. 637 // 3. Pre-screen pattern candidates here: if an operand of the previous 638 // instruction is known to not increase the critical path, then don't match 639 // that pattern. 640 bool TargetInstrInfo::getMachineCombinerPatterns( 641 MachineInstr &Root, 642 SmallVectorImpl<MachineCombinerPattern> &Patterns) const { 643 bool Commute; 644 if (isReassociationCandidate(Root, Commute)) { 645 // We found a sequence of instructions that may be suitable for a 646 // reassociation of operands to increase ILP. Specify each commutation 647 // possibility for the Prev instruction in the sequence and let the 648 // machine combiner decide if changing the operands is worthwhile. 649 if (Commute) { 650 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB); 651 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB); 652 } else { 653 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY); 654 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY); 655 } 656 return true; 657 } 658 659 return false; 660 } 661 /// Return true when a code sequence can improve loop throughput. 662 bool 663 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const { 664 return false; 665 } 666 /// Attempt the reassociation transformation to reduce critical path length. 667 /// See the above comments before getMachineCombinerPatterns(). 668 void TargetInstrInfo::reassociateOps( 669 MachineInstr &Root, MachineInstr &Prev, 670 MachineCombinerPattern Pattern, 671 SmallVectorImpl<MachineInstr *> &InsInstrs, 672 SmallVectorImpl<MachineInstr *> &DelInstrs, 673 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { 674 MachineFunction *MF = Root.getParent()->getParent(); 675 MachineRegisterInfo &MRI = MF->getRegInfo(); 676 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 677 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 678 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI); 679 680 // This array encodes the operand index for each parameter because the 681 // operands may be commuted. Each row corresponds to a pattern value, 682 // and each column specifies the index of A, B, X, Y. 683 unsigned OpIdx[4][4] = { 684 { 1, 1, 2, 2 }, 685 { 1, 2, 2, 1 }, 686 { 2, 1, 1, 2 }, 687 { 2, 2, 1, 1 } 688 }; 689 690 int Row; 691 switch (Pattern) { 692 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break; 693 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break; 694 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break; 695 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break; 696 default: llvm_unreachable("unexpected MachineCombinerPattern"); 697 } 698 699 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]); 700 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]); 701 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]); 702 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); 703 MachineOperand &OpC = Root.getOperand(0); 704 705 unsigned RegA = OpA.getReg(); 706 unsigned RegB = OpB.getReg(); 707 unsigned RegX = OpX.getReg(); 708 unsigned RegY = OpY.getReg(); 709 unsigned RegC = OpC.getReg(); 710 711 if (TargetRegisterInfo::isVirtualRegister(RegA)) 712 MRI.constrainRegClass(RegA, RC); 713 if (TargetRegisterInfo::isVirtualRegister(RegB)) 714 MRI.constrainRegClass(RegB, RC); 715 if (TargetRegisterInfo::isVirtualRegister(RegX)) 716 MRI.constrainRegClass(RegX, RC); 717 if (TargetRegisterInfo::isVirtualRegister(RegY)) 718 MRI.constrainRegClass(RegY, RC); 719 if (TargetRegisterInfo::isVirtualRegister(RegC)) 720 MRI.constrainRegClass(RegC, RC); 721 722 // Create a new virtual register for the result of (X op Y) instead of 723 // recycling RegB because the MachineCombiner's computation of the critical 724 // path requires a new register definition rather than an existing one. 725 unsigned NewVR = MRI.createVirtualRegister(RC); 726 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); 727 728 unsigned Opcode = Root.getOpcode(); 729 bool KillA = OpA.isKill(); 730 bool KillX = OpX.isKill(); 731 bool KillY = OpY.isKill(); 732 733 // Create new instructions for insertion. 734 MachineInstrBuilder MIB1 = 735 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR) 736 .addReg(RegX, getKillRegState(KillX)) 737 .addReg(RegY, getKillRegState(KillY)); 738 MachineInstrBuilder MIB2 = 739 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC) 740 .addReg(RegA, getKillRegState(KillA)) 741 .addReg(NewVR, getKillRegState(true)); 742 743 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2); 744 745 // Record new instructions for insertion and old instructions for deletion. 746 InsInstrs.push_back(MIB1); 747 InsInstrs.push_back(MIB2); 748 DelInstrs.push_back(&Prev); 749 DelInstrs.push_back(&Root); 750 } 751 752 void TargetInstrInfo::genAlternativeCodeSequence( 753 MachineInstr &Root, MachineCombinerPattern Pattern, 754 SmallVectorImpl<MachineInstr *> &InsInstrs, 755 SmallVectorImpl<MachineInstr *> &DelInstrs, 756 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const { 757 MachineRegisterInfo &MRI = Root.getParent()->getParent()->getRegInfo(); 758 759 // Select the previous instruction in the sequence based on the input pattern. 760 MachineInstr *Prev = nullptr; 761 switch (Pattern) { 762 case MachineCombinerPattern::REASSOC_AX_BY: 763 case MachineCombinerPattern::REASSOC_XA_BY: 764 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); 765 break; 766 case MachineCombinerPattern::REASSOC_AX_YB: 767 case MachineCombinerPattern::REASSOC_XA_YB: 768 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); 769 break; 770 default: 771 break; 772 } 773 774 assert(Prev && "Unknown pattern for machine combiner"); 775 776 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); 777 } 778 779 /// foldMemoryOperand - Same as the previous version except it allows folding 780 /// of any load and store from / to any address, not just from a specific 781 /// stack slot. 782 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 783 ArrayRef<unsigned> Ops, 784 MachineInstr &LoadMI, 785 LiveIntervals *LIS) const { 786 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); 787 #ifndef NDEBUG 788 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 789 assert(MI.getOperand(Ops[i]).isUse() && "Folding load into def!"); 790 #endif 791 MachineBasicBlock &MBB = *MI.getParent(); 792 MachineFunction &MF = *MBB.getParent(); 793 794 // Ask the target to do the actual folding. 795 MachineInstr *NewMI = nullptr; 796 int FrameIndex = 0; 797 798 if ((MI.getOpcode() == TargetOpcode::STACKMAP || 799 MI.getOpcode() == TargetOpcode::PATCHPOINT) && 800 isLoadFromStackSlot(LoadMI, FrameIndex)) { 801 // Fold stackmap/patchpoint. 802 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); 803 if (NewMI) 804 NewMI = &*MBB.insert(MI, NewMI); 805 } else { 806 // Ask the target to do the actual folding. 807 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); 808 } 809 810 if (!NewMI) return nullptr; 811 812 // Copy the memoperands from the load to the folded instruction. 813 if (MI.memoperands_empty()) { 814 NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end()); 815 } 816 else { 817 // Handle the rare case of folding multiple loads. 818 NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 819 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), 820 E = LoadMI.memoperands_end(); 821 I != E; ++I) { 822 NewMI->addMemOperand(MF, *I); 823 } 824 } 825 return NewMI; 826 } 827 828 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( 829 const MachineInstr &MI, AliasAnalysis *AA) const { 830 const MachineFunction &MF = *MI.getParent()->getParent(); 831 const MachineRegisterInfo &MRI = MF.getRegInfo(); 832 833 // Remat clients assume operand 0 is the defined register. 834 if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) 835 return false; 836 unsigned DefReg = MI.getOperand(0).getReg(); 837 838 // A sub-register definition can only be rematerialized if the instruction 839 // doesn't read the other parts of the register. Otherwise it is really a 840 // read-modify-write operation on the full virtual register which cannot be 841 // moved safely. 842 if (TargetRegisterInfo::isVirtualRegister(DefReg) && 843 MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg)) 844 return false; 845 846 // A load from a fixed stack slot can be rematerialized. This may be 847 // redundant with subsequent checks, but it's target-independent, 848 // simple, and a common case. 849 int FrameIdx = 0; 850 if (isLoadFromStackSlot(MI, FrameIdx) && 851 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx)) 852 return true; 853 854 // Avoid instructions obviously unsafe for remat. 855 if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects()) 856 return false; 857 858 // Don't remat inline asm. We have no idea how expensive it is 859 // even if it's side effect free. 860 if (MI.isInlineAsm()) 861 return false; 862 863 // Avoid instructions which load from potentially varying memory. 864 if (MI.mayLoad() && !MI.isInvariantLoad(AA)) 865 return false; 866 867 // If any of the registers accessed are non-constant, conservatively assume 868 // the instruction is not rematerializable. 869 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 870 const MachineOperand &MO = MI.getOperand(i); 871 if (!MO.isReg()) continue; 872 unsigned Reg = MO.getReg(); 873 if (Reg == 0) 874 continue; 875 876 // Check for a well-behaved physical register. 877 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 878 if (MO.isUse()) { 879 // If the physreg has no defs anywhere, it's just an ambient register 880 // and we can freely move its uses. Alternatively, if it's allocatable, 881 // it could get allocated to something with a def during allocation. 882 if (!MRI.isConstantPhysReg(Reg, MF)) 883 return false; 884 } else { 885 // A physreg def. We can't remat it. 886 return false; 887 } 888 continue; 889 } 890 891 // Only allow one virtual-register def. There may be multiple defs of the 892 // same virtual register, though. 893 if (MO.isDef() && Reg != DefReg) 894 return false; 895 896 // Don't allow any virtual-register uses. Rematting an instruction with 897 // virtual register uses would length the live ranges of the uses, which 898 // is not necessarily a good idea, certainly not "trivial". 899 if (MO.isUse()) 900 return false; 901 } 902 903 // Everything checked out. 904 return true; 905 } 906 907 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { 908 const MachineFunction *MF = MI.getParent()->getParent(); 909 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 910 bool StackGrowsDown = 911 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 912 913 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 914 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 915 916 if (MI.getOpcode() != FrameSetupOpcode && 917 MI.getOpcode() != FrameDestroyOpcode) 918 return 0; 919 920 int SPAdj = MI.getOperand(0).getImm(); 921 SPAdj = TFI->alignSPAdjust(SPAdj); 922 923 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || 924 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) 925 SPAdj = -SPAdj; 926 927 return SPAdj; 928 } 929 930 /// isSchedulingBoundary - Test if the given instruction should be 931 /// considered a scheduling boundary. This primarily includes labels 932 /// and terminators. 933 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 934 const MachineBasicBlock *MBB, 935 const MachineFunction &MF) const { 936 // Terminators and labels can't be scheduled around. 937 if (MI.isTerminator() || MI.isPosition()) 938 return true; 939 940 // Don't attempt to schedule around any instruction that defines 941 // a stack-oriented pointer, as it's unlikely to be profitable. This 942 // saves compile time, because it doesn't require every single 943 // stack slot reference to depend on the instruction that does the 944 // modification. 945 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 946 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 947 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); 948 } 949 950 // Provide a global flag for disabling the PreRA hazard recognizer that targets 951 // may choose to honor. 952 bool TargetInstrInfo::usePreRAHazardRecognizer() const { 953 return !DisableHazardRecognizer; 954 } 955 956 // Default implementation of CreateTargetRAHazardRecognizer. 957 ScheduleHazardRecognizer *TargetInstrInfo:: 958 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 959 const ScheduleDAG *DAG) const { 960 // Dummy hazard recognizer allows all instructions to issue. 961 return new ScheduleHazardRecognizer(); 962 } 963 964 // Default implementation of CreateTargetMIHazardRecognizer. 965 ScheduleHazardRecognizer *TargetInstrInfo:: 966 CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 967 const ScheduleDAG *DAG) const { 968 return (ScheduleHazardRecognizer *) 969 new ScoreboardHazardRecognizer(II, DAG, "misched"); 970 } 971 972 // Default implementation of CreateTargetPostRAHazardRecognizer. 973 ScheduleHazardRecognizer *TargetInstrInfo:: 974 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 975 const ScheduleDAG *DAG) const { 976 return (ScheduleHazardRecognizer *) 977 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); 978 } 979 980 //===----------------------------------------------------------------------===// 981 // SelectionDAG latency interface. 982 //===----------------------------------------------------------------------===// 983 984 int 985 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 986 SDNode *DefNode, unsigned DefIdx, 987 SDNode *UseNode, unsigned UseIdx) const { 988 if (!ItinData || ItinData->isEmpty()) 989 return -1; 990 991 if (!DefNode->isMachineOpcode()) 992 return -1; 993 994 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); 995 if (!UseNode->isMachineOpcode()) 996 return ItinData->getOperandCycle(DefClass, DefIdx); 997 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); 998 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 999 } 1000 1001 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1002 SDNode *N) const { 1003 if (!ItinData || ItinData->isEmpty()) 1004 return 1; 1005 1006 if (!N->isMachineOpcode()) 1007 return 1; 1008 1009 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); 1010 } 1011 1012 //===----------------------------------------------------------------------===// 1013 // MachineInstr latency interface. 1014 //===----------------------------------------------------------------------===// 1015 1016 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 1017 const MachineInstr &MI) const { 1018 if (!ItinData || ItinData->isEmpty()) 1019 return 1; 1020 1021 unsigned Class = MI.getDesc().getSchedClass(); 1022 int UOps = ItinData->Itineraries[Class].NumMicroOps; 1023 if (UOps >= 0) 1024 return UOps; 1025 1026 // The # of u-ops is dynamically determined. The specific target should 1027 // override this function to return the right number. 1028 return 1; 1029 } 1030 1031 /// Return the default expected latency for a def based on it's opcode. 1032 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, 1033 const MachineInstr &DefMI) const { 1034 if (DefMI.isTransient()) 1035 return 0; 1036 if (DefMI.mayLoad()) 1037 return SchedModel.LoadLatency; 1038 if (isHighLatencyDef(DefMI.getOpcode())) 1039 return SchedModel.HighLatency; 1040 return 1; 1041 } 1042 1043 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { 1044 return 0; 1045 } 1046 1047 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1048 const MachineInstr &MI, 1049 unsigned *PredCost) const { 1050 // Default to one cycle for no itinerary. However, an "empty" itinerary may 1051 // still have a MinLatency property, which getStageLatency checks. 1052 if (!ItinData) 1053 return MI.mayLoad() ? 2 : 1; 1054 1055 return ItinData->getStageLatency(MI.getDesc().getSchedClass()); 1056 } 1057 1058 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 1059 const MachineInstr &DefMI, 1060 unsigned DefIdx) const { 1061 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 1062 if (!ItinData || ItinData->isEmpty()) 1063 return false; 1064 1065 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1066 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 1067 return (DefCycle != -1 && DefCycle <= 1); 1068 } 1069 1070 /// Both DefMI and UseMI must be valid. By default, call directly to the 1071 /// itinerary. This may be overriden by the target. 1072 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1073 const MachineInstr &DefMI, 1074 unsigned DefIdx, 1075 const MachineInstr &UseMI, 1076 unsigned UseIdx) const { 1077 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1078 unsigned UseClass = UseMI.getDesc().getSchedClass(); 1079 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1080 } 1081 1082 /// If we can determine the operand latency from the def only, without itinerary 1083 /// lookup, do so. Otherwise return -1. 1084 int TargetInstrInfo::computeDefOperandLatency( 1085 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const { 1086 1087 // Let the target hook getInstrLatency handle missing itineraries. 1088 if (!ItinData) 1089 return getInstrLatency(ItinData, DefMI); 1090 1091 if(ItinData->isEmpty()) 1092 return defaultDefLatency(ItinData->SchedModel, DefMI); 1093 1094 // ...operand lookup required 1095 return -1; 1096 } 1097 1098 unsigned TargetInstrInfo::computeOperandLatency( 1099 const InstrItineraryData *ItinData, const MachineInstr &DefMI, 1100 unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const { 1101 1102 int DefLatency = computeDefOperandLatency(ItinData, DefMI); 1103 if (DefLatency >= 0) 1104 return DefLatency; 1105 1106 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail"); 1107 1108 int OperLatency = 0; 1109 if (UseMI) 1110 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx); 1111 else { 1112 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1113 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx); 1114 } 1115 if (OperLatency >= 0) 1116 return OperLatency; 1117 1118 // No operand latency was found. 1119 unsigned InstrLatency = getInstrLatency(ItinData, DefMI); 1120 1121 // Expected latency is the max of the stage latency and itinerary props. 1122 InstrLatency = std::max(InstrLatency, 1123 defaultDefLatency(ItinData->SchedModel, DefMI)); 1124 return InstrLatency; 1125 } 1126 1127 bool TargetInstrInfo::getRegSequenceInputs( 1128 const MachineInstr &MI, unsigned DefIdx, 1129 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 1130 assert((MI.isRegSequence() || 1131 MI.isRegSequenceLike()) && "Instruction do not have the proper type"); 1132 1133 if (!MI.isRegSequence()) 1134 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs); 1135 1136 // We are looking at: 1137 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ... 1138 assert(DefIdx == 0 && "REG_SEQUENCE only has one def"); 1139 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx; 1140 OpIdx += 2) { 1141 const MachineOperand &MOReg = MI.getOperand(OpIdx); 1142 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1); 1143 assert(MOSubIdx.isImm() && 1144 "One of the subindex of the reg_sequence is not an immediate"); 1145 // Record Reg:SubReg, SubIdx. 1146 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(), 1147 (unsigned)MOSubIdx.getImm())); 1148 } 1149 return true; 1150 } 1151 1152 bool TargetInstrInfo::getExtractSubregInputs( 1153 const MachineInstr &MI, unsigned DefIdx, 1154 RegSubRegPairAndIdx &InputReg) const { 1155 assert((MI.isExtractSubreg() || 1156 MI.isExtractSubregLike()) && "Instruction do not have the proper type"); 1157 1158 if (!MI.isExtractSubreg()) 1159 return getExtractSubregLikeInputs(MI, DefIdx, InputReg); 1160 1161 // We are looking at: 1162 // Def = EXTRACT_SUBREG v0.sub1, sub0. 1163 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def"); 1164 const MachineOperand &MOReg = MI.getOperand(1); 1165 const MachineOperand &MOSubIdx = MI.getOperand(2); 1166 assert(MOSubIdx.isImm() && 1167 "The subindex of the extract_subreg is not an immediate"); 1168 1169 InputReg.Reg = MOReg.getReg(); 1170 InputReg.SubReg = MOReg.getSubReg(); 1171 InputReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1172 return true; 1173 } 1174 1175 bool TargetInstrInfo::getInsertSubregInputs( 1176 const MachineInstr &MI, unsigned DefIdx, 1177 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const { 1178 assert((MI.isInsertSubreg() || 1179 MI.isInsertSubregLike()) && "Instruction do not have the proper type"); 1180 1181 if (!MI.isInsertSubreg()) 1182 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg); 1183 1184 // We are looking at: 1185 // Def = INSERT_SEQUENCE v0, v1, sub0. 1186 assert(DefIdx == 0 && "INSERT_SUBREG only has one def"); 1187 const MachineOperand &MOBaseReg = MI.getOperand(1); 1188 const MachineOperand &MOInsertedReg = MI.getOperand(2); 1189 const MachineOperand &MOSubIdx = MI.getOperand(3); 1190 assert(MOSubIdx.isImm() && 1191 "One of the subindex of the reg_sequence is not an immediate"); 1192 BaseReg.Reg = MOBaseReg.getReg(); 1193 BaseReg.SubReg = MOBaseReg.getSubReg(); 1194 1195 InsertedReg.Reg = MOInsertedReg.getReg(); 1196 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 1197 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1198 return true; 1199 } 1200