1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/TargetInstrInfo.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineMemOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/PseudoSourceValue.h" 20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 21 #include "llvm/CodeGen/StackMaps.h" 22 #include "llvm/CodeGen/TargetFrameLowering.h" 23 #include "llvm/CodeGen/TargetLowering.h" 24 #include "llvm/CodeGen/TargetRegisterInfo.h" 25 #include "llvm/CodeGen/TargetSchedule.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCInstrItineraries.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/raw_ostream.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include <cctype> 34 35 using namespace llvm; 36 37 static cl::opt<bool> DisableHazardRecognizer( 38 "disable-sched-hazard", cl::Hidden, cl::init(false), 39 cl::desc("Disable hazard detection during preRA scheduling")); 40 41 TargetInstrInfo::~TargetInstrInfo() { 42 } 43 44 const TargetRegisterClass* 45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 46 const TargetRegisterInfo *TRI, 47 const MachineFunction &MF) const { 48 if (OpNum >= MCID.getNumOperands()) 49 return nullptr; 50 51 short RegClass = MCID.OpInfo[OpNum].RegClass; 52 if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) 53 return TRI->getPointerRegClass(MF, RegClass); 54 55 // Instructions like INSERT_SUBREG do not have fixed register classes. 56 if (RegClass < 0) 57 return nullptr; 58 59 // Otherwise just look it up normally. 60 return TRI->getRegClass(RegClass); 61 } 62 63 /// insertNoop - Insert a noop into the instruction stream at the specified 64 /// point. 65 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, 66 MachineBasicBlock::iterator MI) const { 67 llvm_unreachable("Target didn't implement insertNoop!"); 68 } 69 70 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) { 71 return strncmp(Str, MAI.getCommentString().data(), 72 MAI.getCommentString().size()) == 0; 73 } 74 75 /// Measure the specified inline asm to determine an approximation of its 76 /// length. 77 /// Comments (which run till the next SeparatorString or newline) do not 78 /// count as an instruction. 79 /// Any other non-whitespace text is considered an instruction, with 80 /// multiple instructions separated by SeparatorString or newlines. 81 /// Variable-length instructions are not handled here; this function 82 /// may be overloaded in the target code to do that. 83 /// We implement a special case of the .space directive which takes only a 84 /// single integer argument in base 10 that is the size in bytes. This is a 85 /// restricted form of the GAS directive in that we only interpret 86 /// simple--i.e. not a logical or arithmetic expression--size values without 87 /// the optional fill value. This is primarily used for creating arbitrary 88 /// sized inline asm blocks for testing purposes. 89 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str, 90 const MCAsmInfo &MAI) const { 91 // Count the number of instructions in the asm. 92 bool AtInsnStart = true; 93 unsigned Length = 0; 94 for (; *Str; ++Str) { 95 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 96 strlen(MAI.getSeparatorString())) == 0) { 97 AtInsnStart = true; 98 } else if (isAsmComment(Str, MAI)) { 99 // Stop counting as an instruction after a comment until the next 100 // separator. 101 AtInsnStart = false; 102 } 103 104 if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) { 105 unsigned AddLength = MAI.getMaxInstLength(); 106 if (strncmp(Str, ".space", 6) == 0) { 107 char *EStr; 108 int SpaceSize; 109 SpaceSize = strtol(Str + 6, &EStr, 10); 110 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize; 111 while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr))) 112 ++EStr; 113 if (*EStr == '\0' || *EStr == '\n' || 114 isAsmComment(EStr, MAI)) // Successfully parsed .space argument 115 AddLength = SpaceSize; 116 } 117 Length += AddLength; 118 AtInsnStart = false; 119 } 120 } 121 122 return Length; 123 } 124 125 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 126 /// after it, replacing it with an unconditional branch to NewDest. 127 void 128 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 129 MachineBasicBlock *NewDest) const { 130 MachineBasicBlock *MBB = Tail->getParent(); 131 132 // Remove all the old successors of MBB from the CFG. 133 while (!MBB->succ_empty()) 134 MBB->removeSuccessor(MBB->succ_begin()); 135 136 // Save off the debug loc before erasing the instruction. 137 DebugLoc DL = Tail->getDebugLoc(); 138 139 // Remove all the dead instructions from the end of MBB. 140 MBB->erase(Tail, MBB->end()); 141 142 // If MBB isn't immediately before MBB, insert a branch to it. 143 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) 144 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL); 145 MBB->addSuccessor(NewDest); 146 } 147 148 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, 149 bool NewMI, unsigned Idx1, 150 unsigned Idx2) const { 151 const MCInstrDesc &MCID = MI.getDesc(); 152 bool HasDef = MCID.getNumDefs(); 153 if (HasDef && !MI.getOperand(0).isReg()) 154 // No idea how to commute this instruction. Target should implement its own. 155 return nullptr; 156 157 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1; 158 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2; 159 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && 160 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && 161 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); 162 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && 163 "This only knows how to commute register operands so far"); 164 165 unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; 166 unsigned Reg1 = MI.getOperand(Idx1).getReg(); 167 unsigned Reg2 = MI.getOperand(Idx2).getReg(); 168 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; 169 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); 170 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); 171 bool Reg1IsKill = MI.getOperand(Idx1).isKill(); 172 bool Reg2IsKill = MI.getOperand(Idx2).isKill(); 173 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); 174 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); 175 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); 176 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); 177 // If destination is tied to either of the commuted source register, then 178 // it must be updated. 179 if (HasDef && Reg0 == Reg1 && 180 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { 181 Reg2IsKill = false; 182 Reg0 = Reg2; 183 SubReg0 = SubReg2; 184 } else if (HasDef && Reg0 == Reg2 && 185 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { 186 Reg1IsKill = false; 187 Reg0 = Reg1; 188 SubReg0 = SubReg1; 189 } 190 191 MachineInstr *CommutedMI = nullptr; 192 if (NewMI) { 193 // Create a new instruction. 194 MachineFunction &MF = *MI.getMF(); 195 CommutedMI = MF.CloneMachineInstr(&MI); 196 } else { 197 CommutedMI = &MI; 198 } 199 200 if (HasDef) { 201 CommutedMI->getOperand(0).setReg(Reg0); 202 CommutedMI->getOperand(0).setSubReg(SubReg0); 203 } 204 CommutedMI->getOperand(Idx2).setReg(Reg1); 205 CommutedMI->getOperand(Idx1).setReg(Reg2); 206 CommutedMI->getOperand(Idx2).setSubReg(SubReg1); 207 CommutedMI->getOperand(Idx1).setSubReg(SubReg2); 208 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); 209 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); 210 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); 211 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); 212 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); 213 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); 214 return CommutedMI; 215 } 216 217 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, 218 unsigned OpIdx1, 219 unsigned OpIdx2) const { 220 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose 221 // any commutable operand, which is done in findCommutedOpIndices() method 222 // called below. 223 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && 224 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { 225 assert(MI.isCommutable() && 226 "Precondition violation: MI must be commutable."); 227 return nullptr; 228 } 229 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 230 } 231 232 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, 233 unsigned &ResultIdx2, 234 unsigned CommutableOpIdx1, 235 unsigned CommutableOpIdx2) { 236 if (ResultIdx1 == CommuteAnyOperandIndex && 237 ResultIdx2 == CommuteAnyOperandIndex) { 238 ResultIdx1 = CommutableOpIdx1; 239 ResultIdx2 = CommutableOpIdx2; 240 } else if (ResultIdx1 == CommuteAnyOperandIndex) { 241 if (ResultIdx2 == CommutableOpIdx1) 242 ResultIdx1 = CommutableOpIdx2; 243 else if (ResultIdx2 == CommutableOpIdx2) 244 ResultIdx1 = CommutableOpIdx1; 245 else 246 return false; 247 } else if (ResultIdx2 == CommuteAnyOperandIndex) { 248 if (ResultIdx1 == CommutableOpIdx1) 249 ResultIdx2 = CommutableOpIdx2; 250 else if (ResultIdx1 == CommutableOpIdx2) 251 ResultIdx2 = CommutableOpIdx1; 252 else 253 return false; 254 } else 255 // Check that the result operand indices match the given commutable 256 // operand indices. 257 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) || 258 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1); 259 260 return true; 261 } 262 263 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI, 264 unsigned &SrcOpIdx1, 265 unsigned &SrcOpIdx2) const { 266 assert(!MI.isBundle() && 267 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); 268 269 const MCInstrDesc &MCID = MI.getDesc(); 270 if (!MCID.isCommutable()) 271 return false; 272 273 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this 274 // is not true, then the target must implement this. 275 unsigned CommutableOpIdx1 = MCID.getNumDefs(); 276 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1; 277 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 278 CommutableOpIdx1, CommutableOpIdx2)) 279 return false; 280 281 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) 282 // No idea. 283 return false; 284 return true; 285 } 286 287 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { 288 if (!MI.isTerminator()) return false; 289 290 // Conditional branch is a special case. 291 if (MI.isBranch() && !MI.isBarrier()) 292 return true; 293 if (!MI.isPredicable()) 294 return true; 295 return !isPredicated(MI); 296 } 297 298 bool TargetInstrInfo::PredicateInstruction( 299 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 300 bool MadeChange = false; 301 302 assert(!MI.isBundle() && 303 "TargetInstrInfo::PredicateInstruction() can't handle bundles"); 304 305 const MCInstrDesc &MCID = MI.getDesc(); 306 if (!MI.isPredicable()) 307 return false; 308 309 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { 310 if (MCID.OpInfo[i].isPredicate()) { 311 MachineOperand &MO = MI.getOperand(i); 312 if (MO.isReg()) { 313 MO.setReg(Pred[j].getReg()); 314 MadeChange = true; 315 } else if (MO.isImm()) { 316 MO.setImm(Pred[j].getImm()); 317 MadeChange = true; 318 } else if (MO.isMBB()) { 319 MO.setMBB(Pred[j].getMBB()); 320 MadeChange = true; 321 } 322 ++j; 323 } 324 } 325 return MadeChange; 326 } 327 328 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr &MI, 329 const MachineMemOperand *&MMO, 330 int &FrameIndex) const { 331 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 332 oe = MI.memoperands_end(); 333 o != oe; ++o) { 334 if ((*o)->isLoad()) { 335 if (const FixedStackPseudoSourceValue *Value = 336 dyn_cast_or_null<FixedStackPseudoSourceValue>( 337 (*o)->getPseudoValue())) { 338 FrameIndex = Value->getFrameIndex(); 339 MMO = *o; 340 return true; 341 } 342 } 343 } 344 return false; 345 } 346 347 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr &MI, 348 const MachineMemOperand *&MMO, 349 int &FrameIndex) const { 350 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 351 oe = MI.memoperands_end(); 352 o != oe; ++o) { 353 if ((*o)->isStore()) { 354 if (const FixedStackPseudoSourceValue *Value = 355 dyn_cast_or_null<FixedStackPseudoSourceValue>( 356 (*o)->getPseudoValue())) { 357 FrameIndex = Value->getFrameIndex(); 358 MMO = *o; 359 return true; 360 } 361 } 362 } 363 return false; 364 } 365 366 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, 367 unsigned SubIdx, unsigned &Size, 368 unsigned &Offset, 369 const MachineFunction &MF) const { 370 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 371 if (!SubIdx) { 372 Size = TRI->getSpillSize(*RC); 373 Offset = 0; 374 return true; 375 } 376 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); 377 // Convert bit size to byte size to be consistent with 378 // MCRegisterClass::getSize(). 379 if (BitSize % 8) 380 return false; 381 382 int BitOffset = TRI->getSubRegIdxOffset(SubIdx); 383 if (BitOffset < 0 || BitOffset % 8) 384 return false; 385 386 Size = BitSize /= 8; 387 Offset = (unsigned)BitOffset / 8; 388 389 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range"); 390 391 if (!MF.getDataLayout().isLittleEndian()) { 392 Offset = TRI->getSpillSize(*RC) - (Offset + Size); 393 } 394 return true; 395 } 396 397 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, 398 MachineBasicBlock::iterator I, 399 unsigned DestReg, unsigned SubIdx, 400 const MachineInstr &Orig, 401 const TargetRegisterInfo &TRI) const { 402 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 403 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); 404 MBB.insert(I, MI); 405 } 406 407 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, 408 const MachineInstr &MI1, 409 const MachineRegisterInfo *MRI) const { 410 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 411 } 412 413 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB, 414 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const { 415 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); 416 MachineFunction &MF = *MBB.getParent(); 417 return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig); 418 } 419 420 // If the COPY instruction in MI can be folded to a stack operation, return 421 // the register class to use. 422 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, 423 unsigned FoldIdx) { 424 assert(MI.isCopy() && "MI must be a COPY instruction"); 425 if (MI.getNumOperands() != 2) 426 return nullptr; 427 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); 428 429 const MachineOperand &FoldOp = MI.getOperand(FoldIdx); 430 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); 431 432 if (FoldOp.getSubReg() || LiveOp.getSubReg()) 433 return nullptr; 434 435 unsigned FoldReg = FoldOp.getReg(); 436 unsigned LiveReg = LiveOp.getReg(); 437 438 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) && 439 "Cannot fold physregs"); 440 441 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); 442 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); 443 444 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg())) 445 return RC->contains(LiveOp.getReg()) ? RC : nullptr; 446 447 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) 448 return RC; 449 450 // FIXME: Allow folding when register classes are memory compatible. 451 return nullptr; 452 } 453 454 void TargetInstrInfo::getNoop(MCInst &NopInst) const { 455 llvm_unreachable("Not implemented"); 456 } 457 458 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, 459 ArrayRef<unsigned> Ops, int FrameIndex, 460 const TargetInstrInfo &TII) { 461 unsigned StartIdx = 0; 462 switch (MI.getOpcode()) { 463 case TargetOpcode::STACKMAP: { 464 // StackMapLiveValues are foldable 465 StartIdx = StackMapOpers(&MI).getVarIdx(); 466 break; 467 } 468 case TargetOpcode::PATCHPOINT: { 469 // For PatchPoint, the call args are not foldable (even if reported in the 470 // stackmap e.g. via anyregcc). 471 StartIdx = PatchPointOpers(&MI).getVarIdx(); 472 break; 473 } 474 case TargetOpcode::STATEPOINT: { 475 // For statepoints, fold deopt and gc arguments, but not call arguments. 476 StartIdx = StatepointOpers(&MI).getVarIdx(); 477 break; 478 } 479 default: 480 llvm_unreachable("unexpected stackmap opcode"); 481 } 482 483 // Return false if any operands requested for folding are not foldable (not 484 // part of the stackmap's live values). 485 for (unsigned Op : Ops) { 486 if (Op < StartIdx) 487 return nullptr; 488 } 489 490 MachineInstr *NewMI = 491 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); 492 MachineInstrBuilder MIB(MF, NewMI); 493 494 // No need to fold return, the meta data, and function arguments 495 for (unsigned i = 0; i < StartIdx; ++i) 496 MIB.add(MI.getOperand(i)); 497 498 for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) { 499 MachineOperand &MO = MI.getOperand(i); 500 if (is_contained(Ops, i)) { 501 unsigned SpillSize; 502 unsigned SpillOffset; 503 // Compute the spill slot size and offset. 504 const TargetRegisterClass *RC = 505 MF.getRegInfo().getRegClass(MO.getReg()); 506 bool Valid = 507 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF); 508 if (!Valid) 509 report_fatal_error("cannot spill patchpoint subregister operand"); 510 MIB.addImm(StackMaps::IndirectMemRefOp); 511 MIB.addImm(SpillSize); 512 MIB.addFrameIndex(FrameIndex); 513 MIB.addImm(SpillOffset); 514 } 515 else 516 MIB.add(MO); 517 } 518 return NewMI; 519 } 520 521 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 522 ArrayRef<unsigned> Ops, int FI, 523 LiveIntervals *LIS) const { 524 auto Flags = MachineMemOperand::MONone; 525 for (unsigned OpIdx : Ops) 526 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore 527 : MachineMemOperand::MOLoad; 528 529 MachineBasicBlock *MBB = MI.getParent(); 530 assert(MBB && "foldMemoryOperand needs an inserted instruction"); 531 MachineFunction &MF = *MBB->getParent(); 532 533 // If we're not folding a load into a subreg, the size of the load is the 534 // size of the spill slot. But if we are, we need to figure out what the 535 // actual load size is. 536 int64_t MemSize = 0; 537 const MachineFrameInfo &MFI = MF.getFrameInfo(); 538 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 539 540 if (Flags & MachineMemOperand::MOStore) { 541 MemSize = MFI.getObjectSize(FI); 542 } else { 543 for (unsigned OpIdx : Ops) { 544 int64_t OpSize = MFI.getObjectSize(FI); 545 546 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) { 547 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg); 548 if (SubRegSize > 0 && !(SubRegSize % 8)) 549 OpSize = SubRegSize / 8; 550 } 551 552 MemSize = std::max(MemSize, OpSize); 553 } 554 } 555 556 assert(MemSize && "Did not expect a zero-sized stack slot"); 557 558 MachineInstr *NewMI = nullptr; 559 560 if (MI.getOpcode() == TargetOpcode::STACKMAP || 561 MI.getOpcode() == TargetOpcode::PATCHPOINT || 562 MI.getOpcode() == TargetOpcode::STATEPOINT) { 563 // Fold stackmap/patchpoint. 564 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); 565 if (NewMI) 566 MBB->insert(MI, NewMI); 567 } else { 568 // Ask the target to do the actual folding. 569 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS); 570 } 571 572 if (NewMI) { 573 NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 574 // Add a memory operand, foldMemoryOperandImpl doesn't do that. 575 assert((!(Flags & MachineMemOperand::MOStore) || 576 NewMI->mayStore()) && 577 "Folded a def to a non-store!"); 578 assert((!(Flags & MachineMemOperand::MOLoad) || 579 NewMI->mayLoad()) && 580 "Folded a use to a non-load!"); 581 assert(MFI.getObjectOffset(FI) != -1); 582 MachineMemOperand *MMO = MF.getMachineMemOperand( 583 MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize, 584 MFI.getObjectAlignment(FI)); 585 NewMI->addMemOperand(MF, MMO); 586 587 return NewMI; 588 } 589 590 // Straight COPY may fold as load/store. 591 if (!MI.isCopy() || Ops.size() != 1) 592 return nullptr; 593 594 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); 595 if (!RC) 596 return nullptr; 597 598 const MachineOperand &MO = MI.getOperand(1 - Ops[0]); 599 MachineBasicBlock::iterator Pos = MI; 600 601 if (Flags == MachineMemOperand::MOStore) 602 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); 603 else 604 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); 605 return &*--Pos; 606 } 607 608 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 609 ArrayRef<unsigned> Ops, 610 MachineInstr &LoadMI, 611 LiveIntervals *LIS) const { 612 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); 613 #ifndef NDEBUG 614 for (unsigned OpIdx : Ops) 615 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!"); 616 #endif 617 618 MachineBasicBlock &MBB = *MI.getParent(); 619 MachineFunction &MF = *MBB.getParent(); 620 621 // Ask the target to do the actual folding. 622 MachineInstr *NewMI = nullptr; 623 int FrameIndex = 0; 624 625 if ((MI.getOpcode() == TargetOpcode::STACKMAP || 626 MI.getOpcode() == TargetOpcode::PATCHPOINT || 627 MI.getOpcode() == TargetOpcode::STATEPOINT) && 628 isLoadFromStackSlot(LoadMI, FrameIndex)) { 629 // Fold stackmap/patchpoint. 630 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); 631 if (NewMI) 632 NewMI = &*MBB.insert(MI, NewMI); 633 } else { 634 // Ask the target to do the actual folding. 635 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); 636 } 637 638 if (!NewMI) 639 return nullptr; 640 641 // Copy the memoperands from the load to the folded instruction. 642 if (MI.memoperands_empty()) { 643 NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end()); 644 } else { 645 // Handle the rare case of folding multiple loads. 646 NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 647 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), 648 E = LoadMI.memoperands_end(); 649 I != E; ++I) { 650 NewMI->addMemOperand(MF, *I); 651 } 652 } 653 return NewMI; 654 } 655 656 bool TargetInstrInfo::hasReassociableOperands( 657 const MachineInstr &Inst, const MachineBasicBlock *MBB) const { 658 const MachineOperand &Op1 = Inst.getOperand(1); 659 const MachineOperand &Op2 = Inst.getOperand(2); 660 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 661 662 // We need virtual register definitions for the operands that we will 663 // reassociate. 664 MachineInstr *MI1 = nullptr; 665 MachineInstr *MI2 = nullptr; 666 if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg())) 667 MI1 = MRI.getUniqueVRegDef(Op1.getReg()); 668 if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg())) 669 MI2 = MRI.getUniqueVRegDef(Op2.getReg()); 670 671 // And they need to be in the trace (otherwise, they won't have a depth). 672 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB; 673 } 674 675 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst, 676 bool &Commuted) const { 677 const MachineBasicBlock *MBB = Inst.getParent(); 678 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 679 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg()); 680 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg()); 681 unsigned AssocOpcode = Inst.getOpcode(); 682 683 // If only one operand has the same opcode and it's the second source operand, 684 // the operands must be commuted. 685 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode; 686 if (Commuted) 687 std::swap(MI1, MI2); 688 689 // 1. The previous instruction must be the same type as Inst. 690 // 2. The previous instruction must have virtual register definitions for its 691 // operands in the same basic block as Inst. 692 // 3. The previous instruction's result must only be used by Inst. 693 return MI1->getOpcode() == AssocOpcode && 694 hasReassociableOperands(*MI1, MBB) && 695 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()); 696 } 697 698 // 1. The operation must be associative and commutative. 699 // 2. The instruction must have virtual register definitions for its 700 // operands in the same basic block. 701 // 3. The instruction must have a reassociable sibling. 702 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, 703 bool &Commuted) const { 704 return isAssociativeAndCommutative(Inst) && 705 hasReassociableOperands(Inst, Inst.getParent()) && 706 hasReassociableSibling(Inst, Commuted); 707 } 708 709 // The concept of the reassociation pass is that these operations can benefit 710 // from this kind of transformation: 711 // 712 // A = ? op ? 713 // B = A op X (Prev) 714 // C = B op Y (Root) 715 // --> 716 // A = ? op ? 717 // B = X op Y 718 // C = A op B 719 // 720 // breaking the dependency between A and B, allowing them to be executed in 721 // parallel (or back-to-back in a pipeline) instead of depending on each other. 722 723 // FIXME: This has the potential to be expensive (compile time) while not 724 // improving the code at all. Some ways to limit the overhead: 725 // 1. Track successful transforms; bail out if hit rate gets too low. 726 // 2. Only enable at -O3 or some other non-default optimization level. 727 // 3. Pre-screen pattern candidates here: if an operand of the previous 728 // instruction is known to not increase the critical path, then don't match 729 // that pattern. 730 bool TargetInstrInfo::getMachineCombinerPatterns( 731 MachineInstr &Root, 732 SmallVectorImpl<MachineCombinerPattern> &Patterns) const { 733 bool Commute; 734 if (isReassociationCandidate(Root, Commute)) { 735 // We found a sequence of instructions that may be suitable for a 736 // reassociation of operands to increase ILP. Specify each commutation 737 // possibility for the Prev instruction in the sequence and let the 738 // machine combiner decide if changing the operands is worthwhile. 739 if (Commute) { 740 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB); 741 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB); 742 } else { 743 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY); 744 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY); 745 } 746 return true; 747 } 748 749 return false; 750 } 751 752 /// Return true when a code sequence can improve loop throughput. 753 bool 754 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const { 755 return false; 756 } 757 758 /// Attempt the reassociation transformation to reduce critical path length. 759 /// See the above comments before getMachineCombinerPatterns(). 760 void TargetInstrInfo::reassociateOps( 761 MachineInstr &Root, MachineInstr &Prev, 762 MachineCombinerPattern Pattern, 763 SmallVectorImpl<MachineInstr *> &InsInstrs, 764 SmallVectorImpl<MachineInstr *> &DelInstrs, 765 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { 766 MachineFunction *MF = Root.getMF(); 767 MachineRegisterInfo &MRI = MF->getRegInfo(); 768 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 769 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 770 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI); 771 772 // This array encodes the operand index for each parameter because the 773 // operands may be commuted. Each row corresponds to a pattern value, 774 // and each column specifies the index of A, B, X, Y. 775 unsigned OpIdx[4][4] = { 776 { 1, 1, 2, 2 }, 777 { 1, 2, 2, 1 }, 778 { 2, 1, 1, 2 }, 779 { 2, 2, 1, 1 } 780 }; 781 782 int Row; 783 switch (Pattern) { 784 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break; 785 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break; 786 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break; 787 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break; 788 default: llvm_unreachable("unexpected MachineCombinerPattern"); 789 } 790 791 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]); 792 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]); 793 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]); 794 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); 795 MachineOperand &OpC = Root.getOperand(0); 796 797 unsigned RegA = OpA.getReg(); 798 unsigned RegB = OpB.getReg(); 799 unsigned RegX = OpX.getReg(); 800 unsigned RegY = OpY.getReg(); 801 unsigned RegC = OpC.getReg(); 802 803 if (TargetRegisterInfo::isVirtualRegister(RegA)) 804 MRI.constrainRegClass(RegA, RC); 805 if (TargetRegisterInfo::isVirtualRegister(RegB)) 806 MRI.constrainRegClass(RegB, RC); 807 if (TargetRegisterInfo::isVirtualRegister(RegX)) 808 MRI.constrainRegClass(RegX, RC); 809 if (TargetRegisterInfo::isVirtualRegister(RegY)) 810 MRI.constrainRegClass(RegY, RC); 811 if (TargetRegisterInfo::isVirtualRegister(RegC)) 812 MRI.constrainRegClass(RegC, RC); 813 814 // Create a new virtual register for the result of (X op Y) instead of 815 // recycling RegB because the MachineCombiner's computation of the critical 816 // path requires a new register definition rather than an existing one. 817 unsigned NewVR = MRI.createVirtualRegister(RC); 818 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); 819 820 unsigned Opcode = Root.getOpcode(); 821 bool KillA = OpA.isKill(); 822 bool KillX = OpX.isKill(); 823 bool KillY = OpY.isKill(); 824 825 // Create new instructions for insertion. 826 MachineInstrBuilder MIB1 = 827 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR) 828 .addReg(RegX, getKillRegState(KillX)) 829 .addReg(RegY, getKillRegState(KillY)); 830 MachineInstrBuilder MIB2 = 831 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC) 832 .addReg(RegA, getKillRegState(KillA)) 833 .addReg(NewVR, getKillRegState(true)); 834 835 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2); 836 837 // Record new instructions for insertion and old instructions for deletion. 838 InsInstrs.push_back(MIB1); 839 InsInstrs.push_back(MIB2); 840 DelInstrs.push_back(&Prev); 841 DelInstrs.push_back(&Root); 842 } 843 844 void TargetInstrInfo::genAlternativeCodeSequence( 845 MachineInstr &Root, MachineCombinerPattern Pattern, 846 SmallVectorImpl<MachineInstr *> &InsInstrs, 847 SmallVectorImpl<MachineInstr *> &DelInstrs, 848 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const { 849 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo(); 850 851 // Select the previous instruction in the sequence based on the input pattern. 852 MachineInstr *Prev = nullptr; 853 switch (Pattern) { 854 case MachineCombinerPattern::REASSOC_AX_BY: 855 case MachineCombinerPattern::REASSOC_XA_BY: 856 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); 857 break; 858 case MachineCombinerPattern::REASSOC_AX_YB: 859 case MachineCombinerPattern::REASSOC_XA_YB: 860 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); 861 break; 862 default: 863 break; 864 } 865 866 assert(Prev && "Unknown pattern for machine combiner"); 867 868 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); 869 } 870 871 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( 872 const MachineInstr &MI, AliasAnalysis *AA) const { 873 const MachineFunction &MF = *MI.getMF(); 874 const MachineRegisterInfo &MRI = MF.getRegInfo(); 875 876 // Remat clients assume operand 0 is the defined register. 877 if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) 878 return false; 879 unsigned DefReg = MI.getOperand(0).getReg(); 880 881 // A sub-register definition can only be rematerialized if the instruction 882 // doesn't read the other parts of the register. Otherwise it is really a 883 // read-modify-write operation on the full virtual register which cannot be 884 // moved safely. 885 if (TargetRegisterInfo::isVirtualRegister(DefReg) && 886 MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg)) 887 return false; 888 889 // A load from a fixed stack slot can be rematerialized. This may be 890 // redundant with subsequent checks, but it's target-independent, 891 // simple, and a common case. 892 int FrameIdx = 0; 893 if (isLoadFromStackSlot(MI, FrameIdx) && 894 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx)) 895 return true; 896 897 // Avoid instructions obviously unsafe for remat. 898 if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects()) 899 return false; 900 901 // Don't remat inline asm. We have no idea how expensive it is 902 // even if it's side effect free. 903 if (MI.isInlineAsm()) 904 return false; 905 906 // Avoid instructions which load from potentially varying memory. 907 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA)) 908 return false; 909 910 // If any of the registers accessed are non-constant, conservatively assume 911 // the instruction is not rematerializable. 912 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 913 const MachineOperand &MO = MI.getOperand(i); 914 if (!MO.isReg()) continue; 915 unsigned Reg = MO.getReg(); 916 if (Reg == 0) 917 continue; 918 919 // Check for a well-behaved physical register. 920 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 921 if (MO.isUse()) { 922 // If the physreg has no defs anywhere, it's just an ambient register 923 // and we can freely move its uses. Alternatively, if it's allocatable, 924 // it could get allocated to something with a def during allocation. 925 if (!MRI.isConstantPhysReg(Reg)) 926 return false; 927 } else { 928 // A physreg def. We can't remat it. 929 return false; 930 } 931 continue; 932 } 933 934 // Only allow one virtual-register def. There may be multiple defs of the 935 // same virtual register, though. 936 if (MO.isDef() && Reg != DefReg) 937 return false; 938 939 // Don't allow any virtual-register uses. Rematting an instruction with 940 // virtual register uses would length the live ranges of the uses, which 941 // is not necessarily a good idea, certainly not "trivial". 942 if (MO.isUse()) 943 return false; 944 } 945 946 // Everything checked out. 947 return true; 948 } 949 950 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { 951 const MachineFunction *MF = MI.getMF(); 952 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 953 bool StackGrowsDown = 954 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 955 956 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 957 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 958 959 if (!isFrameInstr(MI)) 960 return 0; 961 962 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI)); 963 964 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || 965 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) 966 SPAdj = -SPAdj; 967 968 return SPAdj; 969 } 970 971 /// isSchedulingBoundary - Test if the given instruction should be 972 /// considered a scheduling boundary. This primarily includes labels 973 /// and terminators. 974 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 975 const MachineBasicBlock *MBB, 976 const MachineFunction &MF) const { 977 // Terminators and labels can't be scheduled around. 978 if (MI.isTerminator() || MI.isPosition()) 979 return true; 980 981 // Don't attempt to schedule around any instruction that defines 982 // a stack-oriented pointer, as it's unlikely to be profitable. This 983 // saves compile time, because it doesn't require every single 984 // stack slot reference to depend on the instruction that does the 985 // modification. 986 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 987 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 988 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); 989 } 990 991 // Provide a global flag for disabling the PreRA hazard recognizer that targets 992 // may choose to honor. 993 bool TargetInstrInfo::usePreRAHazardRecognizer() const { 994 return !DisableHazardRecognizer; 995 } 996 997 // Default implementation of CreateTargetRAHazardRecognizer. 998 ScheduleHazardRecognizer *TargetInstrInfo:: 999 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 1000 const ScheduleDAG *DAG) const { 1001 // Dummy hazard recognizer allows all instructions to issue. 1002 return new ScheduleHazardRecognizer(); 1003 } 1004 1005 // Default implementation of CreateTargetMIHazardRecognizer. 1006 ScheduleHazardRecognizer *TargetInstrInfo:: 1007 CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 1008 const ScheduleDAG *DAG) const { 1009 return (ScheduleHazardRecognizer *) 1010 new ScoreboardHazardRecognizer(II, DAG, "misched"); 1011 } 1012 1013 // Default implementation of CreateTargetPostRAHazardRecognizer. 1014 ScheduleHazardRecognizer *TargetInstrInfo:: 1015 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 1016 const ScheduleDAG *DAG) const { 1017 return (ScheduleHazardRecognizer *) 1018 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); 1019 } 1020 1021 //===----------------------------------------------------------------------===// 1022 // SelectionDAG latency interface. 1023 //===----------------------------------------------------------------------===// 1024 1025 int 1026 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1027 SDNode *DefNode, unsigned DefIdx, 1028 SDNode *UseNode, unsigned UseIdx) const { 1029 if (!ItinData || ItinData->isEmpty()) 1030 return -1; 1031 1032 if (!DefNode->isMachineOpcode()) 1033 return -1; 1034 1035 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); 1036 if (!UseNode->isMachineOpcode()) 1037 return ItinData->getOperandCycle(DefClass, DefIdx); 1038 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); 1039 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1040 } 1041 1042 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1043 SDNode *N) const { 1044 if (!ItinData || ItinData->isEmpty()) 1045 return 1; 1046 1047 if (!N->isMachineOpcode()) 1048 return 1; 1049 1050 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); 1051 } 1052 1053 //===----------------------------------------------------------------------===// 1054 // MachineInstr latency interface. 1055 //===----------------------------------------------------------------------===// 1056 1057 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 1058 const MachineInstr &MI) const { 1059 if (!ItinData || ItinData->isEmpty()) 1060 return 1; 1061 1062 unsigned Class = MI.getDesc().getSchedClass(); 1063 int UOps = ItinData->Itineraries[Class].NumMicroOps; 1064 if (UOps >= 0) 1065 return UOps; 1066 1067 // The # of u-ops is dynamically determined. The specific target should 1068 // override this function to return the right number. 1069 return 1; 1070 } 1071 1072 /// Return the default expected latency for a def based on it's opcode. 1073 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, 1074 const MachineInstr &DefMI) const { 1075 if (DefMI.isTransient()) 1076 return 0; 1077 if (DefMI.mayLoad()) 1078 return SchedModel.LoadLatency; 1079 if (isHighLatencyDef(DefMI.getOpcode())) 1080 return SchedModel.HighLatency; 1081 return 1; 1082 } 1083 1084 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { 1085 return 0; 1086 } 1087 1088 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1089 const MachineInstr &MI, 1090 unsigned *PredCost) const { 1091 // Default to one cycle for no itinerary. However, an "empty" itinerary may 1092 // still have a MinLatency property, which getStageLatency checks. 1093 if (!ItinData) 1094 return MI.mayLoad() ? 2 : 1; 1095 1096 return ItinData->getStageLatency(MI.getDesc().getSchedClass()); 1097 } 1098 1099 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 1100 const MachineInstr &DefMI, 1101 unsigned DefIdx) const { 1102 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 1103 if (!ItinData || ItinData->isEmpty()) 1104 return false; 1105 1106 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1107 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 1108 return (DefCycle != -1 && DefCycle <= 1); 1109 } 1110 1111 /// Both DefMI and UseMI must be valid. By default, call directly to the 1112 /// itinerary. This may be overriden by the target. 1113 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1114 const MachineInstr &DefMI, 1115 unsigned DefIdx, 1116 const MachineInstr &UseMI, 1117 unsigned UseIdx) const { 1118 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1119 unsigned UseClass = UseMI.getDesc().getSchedClass(); 1120 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1121 } 1122 1123 /// If we can determine the operand latency from the def only, without itinerary 1124 /// lookup, do so. Otherwise return -1. 1125 int TargetInstrInfo::computeDefOperandLatency( 1126 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const { 1127 1128 // Let the target hook getInstrLatency handle missing itineraries. 1129 if (!ItinData) 1130 return getInstrLatency(ItinData, DefMI); 1131 1132 if(ItinData->isEmpty()) 1133 return defaultDefLatency(ItinData->SchedModel, DefMI); 1134 1135 // ...operand lookup required 1136 return -1; 1137 } 1138 1139 bool TargetInstrInfo::getRegSequenceInputs( 1140 const MachineInstr &MI, unsigned DefIdx, 1141 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 1142 assert((MI.isRegSequence() || 1143 MI.isRegSequenceLike()) && "Instruction do not have the proper type"); 1144 1145 if (!MI.isRegSequence()) 1146 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs); 1147 1148 // We are looking at: 1149 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ... 1150 assert(DefIdx == 0 && "REG_SEQUENCE only has one def"); 1151 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx; 1152 OpIdx += 2) { 1153 const MachineOperand &MOReg = MI.getOperand(OpIdx); 1154 if (MOReg.isUndef()) 1155 continue; 1156 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1); 1157 assert(MOSubIdx.isImm() && 1158 "One of the subindex of the reg_sequence is not an immediate"); 1159 // Record Reg:SubReg, SubIdx. 1160 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(), 1161 (unsigned)MOSubIdx.getImm())); 1162 } 1163 return true; 1164 } 1165 1166 bool TargetInstrInfo::getExtractSubregInputs( 1167 const MachineInstr &MI, unsigned DefIdx, 1168 RegSubRegPairAndIdx &InputReg) const { 1169 assert((MI.isExtractSubreg() || 1170 MI.isExtractSubregLike()) && "Instruction do not have the proper type"); 1171 1172 if (!MI.isExtractSubreg()) 1173 return getExtractSubregLikeInputs(MI, DefIdx, InputReg); 1174 1175 // We are looking at: 1176 // Def = EXTRACT_SUBREG v0.sub1, sub0. 1177 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def"); 1178 const MachineOperand &MOReg = MI.getOperand(1); 1179 if (MOReg.isUndef()) 1180 return false; 1181 const MachineOperand &MOSubIdx = MI.getOperand(2); 1182 assert(MOSubIdx.isImm() && 1183 "The subindex of the extract_subreg is not an immediate"); 1184 1185 InputReg.Reg = MOReg.getReg(); 1186 InputReg.SubReg = MOReg.getSubReg(); 1187 InputReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1188 return true; 1189 } 1190 1191 bool TargetInstrInfo::getInsertSubregInputs( 1192 const MachineInstr &MI, unsigned DefIdx, 1193 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const { 1194 assert((MI.isInsertSubreg() || 1195 MI.isInsertSubregLike()) && "Instruction do not have the proper type"); 1196 1197 if (!MI.isInsertSubreg()) 1198 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg); 1199 1200 // We are looking at: 1201 // Def = INSERT_SEQUENCE v0, v1, sub0. 1202 assert(DefIdx == 0 && "INSERT_SUBREG only has one def"); 1203 const MachineOperand &MOBaseReg = MI.getOperand(1); 1204 const MachineOperand &MOInsertedReg = MI.getOperand(2); 1205 if (MOInsertedReg.isUndef()) 1206 return false; 1207 const MachineOperand &MOSubIdx = MI.getOperand(3); 1208 assert(MOSubIdx.isImm() && 1209 "One of the subindex of the reg_sequence is not an immediate"); 1210 BaseReg.Reg = MOBaseReg.getReg(); 1211 BaseReg.SubReg = MOBaseReg.getSubReg(); 1212 1213 InsertedReg.Reg = MOInsertedReg.getReg(); 1214 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 1215 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1216 return true; 1217 } 1218