1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetInstrInfo.h" 14 #include "llvm/ADT/StringExtras.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/MachineInstrBuilder.h" 17 #include "llvm/CodeGen/MachineMemOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/MachineScheduler.h" 20 #include "llvm/CodeGen/PseudoSourceValue.h" 21 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 22 #include "llvm/CodeGen/StackMaps.h" 23 #include "llvm/CodeGen/TargetFrameLowering.h" 24 #include "llvm/CodeGen/TargetLowering.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/CodeGen/TargetSchedule.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/DebugInfoMetadata.h" 29 #include "llvm/MC/MCAsmInfo.h" 30 #include "llvm/MC/MCInstrItineraries.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Target/TargetMachine.h" 35 #include <cctype> 36 37 using namespace llvm; 38 39 static cl::opt<bool> DisableHazardRecognizer( 40 "disable-sched-hazard", cl::Hidden, cl::init(false), 41 cl::desc("Disable hazard detection during preRA scheduling")); 42 43 TargetInstrInfo::~TargetInstrInfo() { 44 } 45 46 const TargetRegisterClass* 47 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 48 const TargetRegisterInfo *TRI, 49 const MachineFunction &MF) const { 50 if (OpNum >= MCID.getNumOperands()) 51 return nullptr; 52 53 short RegClass = MCID.OpInfo[OpNum].RegClass; 54 if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) 55 return TRI->getPointerRegClass(MF, RegClass); 56 57 // Instructions like INSERT_SUBREG do not have fixed register classes. 58 if (RegClass < 0) 59 return nullptr; 60 61 // Otherwise just look it up normally. 62 return TRI->getRegClass(RegClass); 63 } 64 65 /// insertNoop - Insert a noop into the instruction stream at the specified 66 /// point. 67 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, 68 MachineBasicBlock::iterator MI) const { 69 llvm_unreachable("Target didn't implement insertNoop!"); 70 } 71 72 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) { 73 return strncmp(Str, MAI.getCommentString().data(), 74 MAI.getCommentString().size()) == 0; 75 } 76 77 /// Measure the specified inline asm to determine an approximation of its 78 /// length. 79 /// Comments (which run till the next SeparatorString or newline) do not 80 /// count as an instruction. 81 /// Any other non-whitespace text is considered an instruction, with 82 /// multiple instructions separated by SeparatorString or newlines. 83 /// Variable-length instructions are not handled here; this function 84 /// may be overloaded in the target code to do that. 85 /// We implement a special case of the .space directive which takes only a 86 /// single integer argument in base 10 that is the size in bytes. This is a 87 /// restricted form of the GAS directive in that we only interpret 88 /// simple--i.e. not a logical or arithmetic expression--size values without 89 /// the optional fill value. This is primarily used for creating arbitrary 90 /// sized inline asm blocks for testing purposes. 91 unsigned TargetInstrInfo::getInlineAsmLength( 92 const char *Str, 93 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const { 94 // Count the number of instructions in the asm. 95 bool AtInsnStart = true; 96 unsigned Length = 0; 97 const unsigned MaxInstLength = MAI.getMaxInstLength(STI); 98 for (; *Str; ++Str) { 99 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 100 strlen(MAI.getSeparatorString())) == 0) { 101 AtInsnStart = true; 102 } else if (isAsmComment(Str, MAI)) { 103 // Stop counting as an instruction after a comment until the next 104 // separator. 105 AtInsnStart = false; 106 } 107 108 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) { 109 unsigned AddLength = MaxInstLength; 110 if (strncmp(Str, ".space", 6) == 0) { 111 char *EStr; 112 int SpaceSize; 113 SpaceSize = strtol(Str + 6, &EStr, 10); 114 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize; 115 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr))) 116 ++EStr; 117 if (*EStr == '\0' || *EStr == '\n' || 118 isAsmComment(EStr, MAI)) // Successfully parsed .space argument 119 AddLength = SpaceSize; 120 } 121 Length += AddLength; 122 AtInsnStart = false; 123 } 124 } 125 126 return Length; 127 } 128 129 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 130 /// after it, replacing it with an unconditional branch to NewDest. 131 void 132 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 133 MachineBasicBlock *NewDest) const { 134 MachineBasicBlock *MBB = Tail->getParent(); 135 136 // Remove all the old successors of MBB from the CFG. 137 while (!MBB->succ_empty()) 138 MBB->removeSuccessor(MBB->succ_begin()); 139 140 // Save off the debug loc before erasing the instruction. 141 DebugLoc DL = Tail->getDebugLoc(); 142 143 // Update call site info and remove all the dead instructions 144 // from the end of MBB. 145 while (Tail != MBB->end()) { 146 auto MI = Tail++; 147 if (MI->shouldUpdateCallSiteInfo()) 148 MBB->getParent()->eraseCallSiteInfo(&*MI); 149 MBB->erase(MI); 150 } 151 152 // If MBB isn't immediately before MBB, insert a branch to it. 153 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) 154 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL); 155 MBB->addSuccessor(NewDest); 156 } 157 158 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, 159 bool NewMI, unsigned Idx1, 160 unsigned Idx2) const { 161 const MCInstrDesc &MCID = MI.getDesc(); 162 bool HasDef = MCID.getNumDefs(); 163 if (HasDef && !MI.getOperand(0).isReg()) 164 // No idea how to commute this instruction. Target should implement its own. 165 return nullptr; 166 167 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1; 168 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2; 169 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && 170 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && 171 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); 172 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && 173 "This only knows how to commute register operands so far"); 174 175 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); 176 Register Reg1 = MI.getOperand(Idx1).getReg(); 177 Register Reg2 = MI.getOperand(Idx2).getReg(); 178 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; 179 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); 180 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); 181 bool Reg1IsKill = MI.getOperand(Idx1).isKill(); 182 bool Reg2IsKill = MI.getOperand(Idx2).isKill(); 183 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); 184 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); 185 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); 186 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); 187 // Avoid calling isRenamable for virtual registers since we assert that 188 // renamable property is only queried/set for physical registers. 189 bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1) 190 ? MI.getOperand(Idx1).isRenamable() 191 : false; 192 bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2) 193 ? MI.getOperand(Idx2).isRenamable() 194 : false; 195 // If destination is tied to either of the commuted source register, then 196 // it must be updated. 197 if (HasDef && Reg0 == Reg1 && 198 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { 199 Reg2IsKill = false; 200 Reg0 = Reg2; 201 SubReg0 = SubReg2; 202 } else if (HasDef && Reg0 == Reg2 && 203 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { 204 Reg1IsKill = false; 205 Reg0 = Reg1; 206 SubReg0 = SubReg1; 207 } 208 209 MachineInstr *CommutedMI = nullptr; 210 if (NewMI) { 211 // Create a new instruction. 212 MachineFunction &MF = *MI.getMF(); 213 CommutedMI = MF.CloneMachineInstr(&MI); 214 } else { 215 CommutedMI = &MI; 216 } 217 218 if (HasDef) { 219 CommutedMI->getOperand(0).setReg(Reg0); 220 CommutedMI->getOperand(0).setSubReg(SubReg0); 221 } 222 CommutedMI->getOperand(Idx2).setReg(Reg1); 223 CommutedMI->getOperand(Idx1).setReg(Reg2); 224 CommutedMI->getOperand(Idx2).setSubReg(SubReg1); 225 CommutedMI->getOperand(Idx1).setSubReg(SubReg2); 226 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); 227 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); 228 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); 229 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); 230 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); 231 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); 232 // Avoid calling setIsRenamable for virtual registers since we assert that 233 // renamable property is only queried/set for physical registers. 234 if (Register::isPhysicalRegister(Reg1)) 235 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable); 236 if (Register::isPhysicalRegister(Reg2)) 237 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable); 238 return CommutedMI; 239 } 240 241 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, 242 unsigned OpIdx1, 243 unsigned OpIdx2) const { 244 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose 245 // any commutable operand, which is done in findCommutedOpIndices() method 246 // called below. 247 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && 248 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { 249 assert(MI.isCommutable() && 250 "Precondition violation: MI must be commutable."); 251 return nullptr; 252 } 253 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 254 } 255 256 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, 257 unsigned &ResultIdx2, 258 unsigned CommutableOpIdx1, 259 unsigned CommutableOpIdx2) { 260 if (ResultIdx1 == CommuteAnyOperandIndex && 261 ResultIdx2 == CommuteAnyOperandIndex) { 262 ResultIdx1 = CommutableOpIdx1; 263 ResultIdx2 = CommutableOpIdx2; 264 } else if (ResultIdx1 == CommuteAnyOperandIndex) { 265 if (ResultIdx2 == CommutableOpIdx1) 266 ResultIdx1 = CommutableOpIdx2; 267 else if (ResultIdx2 == CommutableOpIdx2) 268 ResultIdx1 = CommutableOpIdx1; 269 else 270 return false; 271 } else if (ResultIdx2 == CommuteAnyOperandIndex) { 272 if (ResultIdx1 == CommutableOpIdx1) 273 ResultIdx2 = CommutableOpIdx2; 274 else if (ResultIdx1 == CommutableOpIdx2) 275 ResultIdx2 = CommutableOpIdx1; 276 else 277 return false; 278 } else 279 // Check that the result operand indices match the given commutable 280 // operand indices. 281 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) || 282 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1); 283 284 return true; 285 } 286 287 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 288 unsigned &SrcOpIdx1, 289 unsigned &SrcOpIdx2) const { 290 assert(!MI.isBundle() && 291 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); 292 293 const MCInstrDesc &MCID = MI.getDesc(); 294 if (!MCID.isCommutable()) 295 return false; 296 297 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this 298 // is not true, then the target must implement this. 299 unsigned CommutableOpIdx1 = MCID.getNumDefs(); 300 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1; 301 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 302 CommutableOpIdx1, CommutableOpIdx2)) 303 return false; 304 305 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) 306 // No idea. 307 return false; 308 return true; 309 } 310 311 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const { 312 if (!MI.isTerminator()) return false; 313 314 // Conditional branch is a special case. 315 if (MI.isBranch() && !MI.isBarrier()) 316 return true; 317 if (!MI.isPredicable()) 318 return true; 319 return !isPredicated(MI); 320 } 321 322 bool TargetInstrInfo::PredicateInstruction( 323 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 324 bool MadeChange = false; 325 326 assert(!MI.isBundle() && 327 "TargetInstrInfo::PredicateInstruction() can't handle bundles"); 328 329 const MCInstrDesc &MCID = MI.getDesc(); 330 if (!MI.isPredicable()) 331 return false; 332 333 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { 334 if (MCID.OpInfo[i].isPredicate()) { 335 MachineOperand &MO = MI.getOperand(i); 336 if (MO.isReg()) { 337 MO.setReg(Pred[j].getReg()); 338 MadeChange = true; 339 } else if (MO.isImm()) { 340 MO.setImm(Pred[j].getImm()); 341 MadeChange = true; 342 } else if (MO.isMBB()) { 343 MO.setMBB(Pred[j].getMBB()); 344 MadeChange = true; 345 } 346 ++j; 347 } 348 } 349 return MadeChange; 350 } 351 352 bool TargetInstrInfo::hasLoadFromStackSlot( 353 const MachineInstr &MI, 354 SmallVectorImpl<const MachineMemOperand *> &Accesses) const { 355 size_t StartSize = Accesses.size(); 356 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 357 oe = MI.memoperands_end(); 358 o != oe; ++o) { 359 if ((*o)->isLoad() && 360 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) 361 Accesses.push_back(*o); 362 } 363 return Accesses.size() != StartSize; 364 } 365 366 bool TargetInstrInfo::hasStoreToStackSlot( 367 const MachineInstr &MI, 368 SmallVectorImpl<const MachineMemOperand *> &Accesses) const { 369 size_t StartSize = Accesses.size(); 370 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), 371 oe = MI.memoperands_end(); 372 o != oe; ++o) { 373 if ((*o)->isStore() && 374 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue())) 375 Accesses.push_back(*o); 376 } 377 return Accesses.size() != StartSize; 378 } 379 380 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, 381 unsigned SubIdx, unsigned &Size, 382 unsigned &Offset, 383 const MachineFunction &MF) const { 384 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 385 if (!SubIdx) { 386 Size = TRI->getSpillSize(*RC); 387 Offset = 0; 388 return true; 389 } 390 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); 391 // Convert bit size to byte size. 392 if (BitSize % 8) 393 return false; 394 395 int BitOffset = TRI->getSubRegIdxOffset(SubIdx); 396 if (BitOffset < 0 || BitOffset % 8) 397 return false; 398 399 Size = BitSize / 8; 400 Offset = (unsigned)BitOffset / 8; 401 402 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range"); 403 404 if (!MF.getDataLayout().isLittleEndian()) { 405 Offset = TRI->getSpillSize(*RC) - (Offset + Size); 406 } 407 return true; 408 } 409 410 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, 411 MachineBasicBlock::iterator I, 412 Register DestReg, unsigned SubIdx, 413 const MachineInstr &Orig, 414 const TargetRegisterInfo &TRI) const { 415 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 416 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); 417 MBB.insert(I, MI); 418 } 419 420 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, 421 const MachineInstr &MI1, 422 const MachineRegisterInfo *MRI) const { 423 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 424 } 425 426 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB, 427 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const { 428 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); 429 MachineFunction &MF = *MBB.getParent(); 430 return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig); 431 } 432 433 // If the COPY instruction in MI can be folded to a stack operation, return 434 // the register class to use. 435 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, 436 unsigned FoldIdx) { 437 assert(MI.isCopy() && "MI must be a COPY instruction"); 438 if (MI.getNumOperands() != 2) 439 return nullptr; 440 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); 441 442 const MachineOperand &FoldOp = MI.getOperand(FoldIdx); 443 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); 444 445 if (FoldOp.getSubReg() || LiveOp.getSubReg()) 446 return nullptr; 447 448 Register FoldReg = FoldOp.getReg(); 449 Register LiveReg = LiveOp.getReg(); 450 451 assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs"); 452 453 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); 454 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); 455 456 if (Register::isPhysicalRegister(LiveOp.getReg())) 457 return RC->contains(LiveOp.getReg()) ? RC : nullptr; 458 459 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) 460 return RC; 461 462 // FIXME: Allow folding when register classes are memory compatible. 463 return nullptr; 464 } 465 466 void TargetInstrInfo::getNoop(MCInst &NopInst) const { 467 llvm_unreachable("Not implemented"); 468 } 469 470 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, 471 ArrayRef<unsigned> Ops, int FrameIndex, 472 const TargetInstrInfo &TII) { 473 unsigned StartIdx = 0; 474 unsigned NumDefs = 0; 475 switch (MI.getOpcode()) { 476 case TargetOpcode::STACKMAP: { 477 // StackMapLiveValues are foldable 478 StartIdx = StackMapOpers(&MI).getVarIdx(); 479 break; 480 } 481 case TargetOpcode::PATCHPOINT: { 482 // For PatchPoint, the call args are not foldable (even if reported in the 483 // stackmap e.g. via anyregcc). 484 StartIdx = PatchPointOpers(&MI).getVarIdx(); 485 break; 486 } 487 case TargetOpcode::STATEPOINT: { 488 // For statepoints, fold deopt and gc arguments, but not call arguments. 489 StartIdx = StatepointOpers(&MI).getVarIdx(); 490 NumDefs = MI.getNumDefs(); 491 break; 492 } 493 default: 494 llvm_unreachable("unexpected stackmap opcode"); 495 } 496 497 unsigned DefToFoldIdx = MI.getNumOperands(); 498 499 // Return false if any operands requested for folding are not foldable (not 500 // part of the stackmap's live values). 501 for (unsigned Op : Ops) { 502 if (Op < NumDefs) { 503 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs"); 504 DefToFoldIdx = Op; 505 } else if (Op < StartIdx) { 506 return nullptr; 507 } 508 // When called from regalloc (InlineSpiller), operands must be untied, 509 // and regalloc will take care of (re)loading operand from memory. 510 // But when called from other places (e.g. peephole pass), 511 // we cannot fold operand which are tied - callers are unaware they 512 // need to reload destination register. 513 if (MI.getOperand(Op).isTied()) 514 return nullptr; 515 } 516 517 MachineInstr *NewMI = 518 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); 519 MachineInstrBuilder MIB(MF, NewMI); 520 521 // No need to fold return, the meta data, and function arguments 522 for (unsigned i = 0; i < StartIdx; ++i) 523 if (i != DefToFoldIdx) 524 MIB.add(MI.getOperand(i)); 525 526 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) { 527 MachineOperand &MO = MI.getOperand(i); 528 unsigned TiedTo = e; 529 (void)MI.isRegTiedToDefOperand(i, &TiedTo); 530 531 if (is_contained(Ops, i)) { 532 assert(TiedTo == e && "Cannot fold tied operands"); 533 unsigned SpillSize; 534 unsigned SpillOffset; 535 // Compute the spill slot size and offset. 536 const TargetRegisterClass *RC = 537 MF.getRegInfo().getRegClass(MO.getReg()); 538 bool Valid = 539 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF); 540 if (!Valid) 541 report_fatal_error("cannot spill patchpoint subregister operand"); 542 MIB.addImm(StackMaps::IndirectMemRefOp); 543 MIB.addImm(SpillSize); 544 MIB.addFrameIndex(FrameIndex); 545 MIB.addImm(SpillOffset); 546 } else { 547 MIB.add(MO); 548 if (TiedTo < e) { 549 assert(TiedTo < NumDefs && "Bad tied operand"); 550 if (TiedTo > DefToFoldIdx) 551 --TiedTo; 552 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1); 553 } 554 } 555 } 556 return NewMI; 557 } 558 559 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 560 ArrayRef<unsigned> Ops, int FI, 561 LiveIntervals *LIS, 562 VirtRegMap *VRM) const { 563 auto Flags = MachineMemOperand::MONone; 564 for (unsigned OpIdx : Ops) 565 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore 566 : MachineMemOperand::MOLoad; 567 568 MachineBasicBlock *MBB = MI.getParent(); 569 assert(MBB && "foldMemoryOperand needs an inserted instruction"); 570 MachineFunction &MF = *MBB->getParent(); 571 572 // If we're not folding a load into a subreg, the size of the load is the 573 // size of the spill slot. But if we are, we need to figure out what the 574 // actual load size is. 575 int64_t MemSize = 0; 576 const MachineFrameInfo &MFI = MF.getFrameInfo(); 577 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 578 579 if (Flags & MachineMemOperand::MOStore) { 580 MemSize = MFI.getObjectSize(FI); 581 } else { 582 for (unsigned OpIdx : Ops) { 583 int64_t OpSize = MFI.getObjectSize(FI); 584 585 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) { 586 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg); 587 if (SubRegSize > 0 && !(SubRegSize % 8)) 588 OpSize = SubRegSize / 8; 589 } 590 591 MemSize = std::max(MemSize, OpSize); 592 } 593 } 594 595 assert(MemSize && "Did not expect a zero-sized stack slot"); 596 597 MachineInstr *NewMI = nullptr; 598 599 if (MI.getOpcode() == TargetOpcode::STACKMAP || 600 MI.getOpcode() == TargetOpcode::PATCHPOINT || 601 MI.getOpcode() == TargetOpcode::STATEPOINT) { 602 // Fold stackmap/patchpoint. 603 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); 604 if (NewMI) 605 MBB->insert(MI, NewMI); 606 } else { 607 // Ask the target to do the actual folding. 608 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM); 609 } 610 611 if (NewMI) { 612 NewMI->setMemRefs(MF, MI.memoperands()); 613 // Add a memory operand, foldMemoryOperandImpl doesn't do that. 614 assert((!(Flags & MachineMemOperand::MOStore) || 615 NewMI->mayStore()) && 616 "Folded a def to a non-store!"); 617 assert((!(Flags & MachineMemOperand::MOLoad) || 618 NewMI->mayLoad()) && 619 "Folded a use to a non-load!"); 620 assert(MFI.getObjectOffset(FI) != -1); 621 MachineMemOperand *MMO = 622 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 623 Flags, MemSize, MFI.getObjectAlign(FI)); 624 NewMI->addMemOperand(MF, MMO); 625 626 // The pass "x86 speculative load hardening" always attaches symbols to 627 // call instructions. We need copy it form old instruction. 628 NewMI->cloneInstrSymbols(MF, MI); 629 630 return NewMI; 631 } 632 633 // Straight COPY may fold as load/store. 634 if (!MI.isCopy() || Ops.size() != 1) 635 return nullptr; 636 637 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); 638 if (!RC) 639 return nullptr; 640 641 const MachineOperand &MO = MI.getOperand(1 - Ops[0]); 642 MachineBasicBlock::iterator Pos = MI; 643 644 if (Flags == MachineMemOperand::MOStore) 645 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); 646 else 647 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); 648 return &*--Pos; 649 } 650 651 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, 652 ArrayRef<unsigned> Ops, 653 MachineInstr &LoadMI, 654 LiveIntervals *LIS) const { 655 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); 656 #ifndef NDEBUG 657 for (unsigned OpIdx : Ops) 658 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!"); 659 #endif 660 661 MachineBasicBlock &MBB = *MI.getParent(); 662 MachineFunction &MF = *MBB.getParent(); 663 664 // Ask the target to do the actual folding. 665 MachineInstr *NewMI = nullptr; 666 int FrameIndex = 0; 667 668 if ((MI.getOpcode() == TargetOpcode::STACKMAP || 669 MI.getOpcode() == TargetOpcode::PATCHPOINT || 670 MI.getOpcode() == TargetOpcode::STATEPOINT) && 671 isLoadFromStackSlot(LoadMI, FrameIndex)) { 672 // Fold stackmap/patchpoint. 673 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); 674 if (NewMI) 675 NewMI = &*MBB.insert(MI, NewMI); 676 } else { 677 // Ask the target to do the actual folding. 678 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); 679 } 680 681 if (!NewMI) 682 return nullptr; 683 684 // Copy the memoperands from the load to the folded instruction. 685 if (MI.memoperands_empty()) { 686 NewMI->setMemRefs(MF, LoadMI.memoperands()); 687 } else { 688 // Handle the rare case of folding multiple loads. 689 NewMI->setMemRefs(MF, MI.memoperands()); 690 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), 691 E = LoadMI.memoperands_end(); 692 I != E; ++I) { 693 NewMI->addMemOperand(MF, *I); 694 } 695 } 696 return NewMI; 697 } 698 699 bool TargetInstrInfo::hasReassociableOperands( 700 const MachineInstr &Inst, const MachineBasicBlock *MBB) const { 701 const MachineOperand &Op1 = Inst.getOperand(1); 702 const MachineOperand &Op2 = Inst.getOperand(2); 703 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 704 705 // We need virtual register definitions for the operands that we will 706 // reassociate. 707 MachineInstr *MI1 = nullptr; 708 MachineInstr *MI2 = nullptr; 709 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) 710 MI1 = MRI.getUniqueVRegDef(Op1.getReg()); 711 if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg())) 712 MI2 = MRI.getUniqueVRegDef(Op2.getReg()); 713 714 // And they need to be in the trace (otherwise, they won't have a depth). 715 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB; 716 } 717 718 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst, 719 bool &Commuted) const { 720 const MachineBasicBlock *MBB = Inst.getParent(); 721 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 722 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg()); 723 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg()); 724 unsigned AssocOpcode = Inst.getOpcode(); 725 726 // If only one operand has the same opcode and it's the second source operand, 727 // the operands must be commuted. 728 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode; 729 if (Commuted) 730 std::swap(MI1, MI2); 731 732 // 1. The previous instruction must be the same type as Inst. 733 // 2. The previous instruction must also be associative/commutative (this can 734 // be different even for instructions with the same opcode if traits like 735 // fast-math-flags are included). 736 // 3. The previous instruction must have virtual register definitions for its 737 // operands in the same basic block as Inst. 738 // 4. The previous instruction's result must only be used by Inst. 739 return MI1->getOpcode() == AssocOpcode && isAssociativeAndCommutative(*MI1) && 740 hasReassociableOperands(*MI1, MBB) && 741 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()); 742 } 743 744 // 1. The operation must be associative and commutative. 745 // 2. The instruction must have virtual register definitions for its 746 // operands in the same basic block. 747 // 3. The instruction must have a reassociable sibling. 748 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst, 749 bool &Commuted) const { 750 return isAssociativeAndCommutative(Inst) && 751 hasReassociableOperands(Inst, Inst.getParent()) && 752 hasReassociableSibling(Inst, Commuted); 753 } 754 755 // The concept of the reassociation pass is that these operations can benefit 756 // from this kind of transformation: 757 // 758 // A = ? op ? 759 // B = A op X (Prev) 760 // C = B op Y (Root) 761 // --> 762 // A = ? op ? 763 // B = X op Y 764 // C = A op B 765 // 766 // breaking the dependency between A and B, allowing them to be executed in 767 // parallel (or back-to-back in a pipeline) instead of depending on each other. 768 769 // FIXME: This has the potential to be expensive (compile time) while not 770 // improving the code at all. Some ways to limit the overhead: 771 // 1. Track successful transforms; bail out if hit rate gets too low. 772 // 2. Only enable at -O3 or some other non-default optimization level. 773 // 3. Pre-screen pattern candidates here: if an operand of the previous 774 // instruction is known to not increase the critical path, then don't match 775 // that pattern. 776 bool TargetInstrInfo::getMachineCombinerPatterns( 777 MachineInstr &Root, 778 SmallVectorImpl<MachineCombinerPattern> &Patterns) const { 779 bool Commute; 780 if (isReassociationCandidate(Root, Commute)) { 781 // We found a sequence of instructions that may be suitable for a 782 // reassociation of operands to increase ILP. Specify each commutation 783 // possibility for the Prev instruction in the sequence and let the 784 // machine combiner decide if changing the operands is worthwhile. 785 if (Commute) { 786 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB); 787 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB); 788 } else { 789 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY); 790 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY); 791 } 792 return true; 793 } 794 795 return false; 796 } 797 798 /// Return true when a code sequence can improve loop throughput. 799 bool 800 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const { 801 return false; 802 } 803 804 /// Attempt the reassociation transformation to reduce critical path length. 805 /// See the above comments before getMachineCombinerPatterns(). 806 void TargetInstrInfo::reassociateOps( 807 MachineInstr &Root, MachineInstr &Prev, 808 MachineCombinerPattern Pattern, 809 SmallVectorImpl<MachineInstr *> &InsInstrs, 810 SmallVectorImpl<MachineInstr *> &DelInstrs, 811 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { 812 MachineFunction *MF = Root.getMF(); 813 MachineRegisterInfo &MRI = MF->getRegInfo(); 814 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 815 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 816 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI); 817 818 // This array encodes the operand index for each parameter because the 819 // operands may be commuted. Each row corresponds to a pattern value, 820 // and each column specifies the index of A, B, X, Y. 821 unsigned OpIdx[4][4] = { 822 { 1, 1, 2, 2 }, 823 { 1, 2, 2, 1 }, 824 { 2, 1, 1, 2 }, 825 { 2, 2, 1, 1 } 826 }; 827 828 int Row; 829 switch (Pattern) { 830 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break; 831 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break; 832 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break; 833 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break; 834 default: llvm_unreachable("unexpected MachineCombinerPattern"); 835 } 836 837 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]); 838 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]); 839 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]); 840 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]); 841 MachineOperand &OpC = Root.getOperand(0); 842 843 Register RegA = OpA.getReg(); 844 Register RegB = OpB.getReg(); 845 Register RegX = OpX.getReg(); 846 Register RegY = OpY.getReg(); 847 Register RegC = OpC.getReg(); 848 849 if (Register::isVirtualRegister(RegA)) 850 MRI.constrainRegClass(RegA, RC); 851 if (Register::isVirtualRegister(RegB)) 852 MRI.constrainRegClass(RegB, RC); 853 if (Register::isVirtualRegister(RegX)) 854 MRI.constrainRegClass(RegX, RC); 855 if (Register::isVirtualRegister(RegY)) 856 MRI.constrainRegClass(RegY, RC); 857 if (Register::isVirtualRegister(RegC)) 858 MRI.constrainRegClass(RegC, RC); 859 860 // Create a new virtual register for the result of (X op Y) instead of 861 // recycling RegB because the MachineCombiner's computation of the critical 862 // path requires a new register definition rather than an existing one. 863 Register NewVR = MRI.createVirtualRegister(RC); 864 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); 865 866 unsigned Opcode = Root.getOpcode(); 867 bool KillA = OpA.isKill(); 868 bool KillX = OpX.isKill(); 869 bool KillY = OpY.isKill(); 870 871 // Create new instructions for insertion. 872 MachineInstrBuilder MIB1 = 873 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR) 874 .addReg(RegX, getKillRegState(KillX)) 875 .addReg(RegY, getKillRegState(KillY)); 876 MachineInstrBuilder MIB2 = 877 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC) 878 .addReg(RegA, getKillRegState(KillA)) 879 .addReg(NewVR, getKillRegState(true)); 880 881 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2); 882 883 // Record new instructions for insertion and old instructions for deletion. 884 InsInstrs.push_back(MIB1); 885 InsInstrs.push_back(MIB2); 886 DelInstrs.push_back(&Prev); 887 DelInstrs.push_back(&Root); 888 } 889 890 void TargetInstrInfo::genAlternativeCodeSequence( 891 MachineInstr &Root, MachineCombinerPattern Pattern, 892 SmallVectorImpl<MachineInstr *> &InsInstrs, 893 SmallVectorImpl<MachineInstr *> &DelInstrs, 894 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const { 895 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo(); 896 897 // Select the previous instruction in the sequence based on the input pattern. 898 MachineInstr *Prev = nullptr; 899 switch (Pattern) { 900 case MachineCombinerPattern::REASSOC_AX_BY: 901 case MachineCombinerPattern::REASSOC_XA_BY: 902 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); 903 break; 904 case MachineCombinerPattern::REASSOC_AX_YB: 905 case MachineCombinerPattern::REASSOC_XA_YB: 906 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); 907 break; 908 default: 909 break; 910 } 911 912 assert(Prev && "Unknown pattern for machine combiner"); 913 914 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); 915 } 916 917 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( 918 const MachineInstr &MI, AAResults *AA) const { 919 const MachineFunction &MF = *MI.getMF(); 920 const MachineRegisterInfo &MRI = MF.getRegInfo(); 921 922 // Remat clients assume operand 0 is the defined register. 923 if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) 924 return false; 925 Register DefReg = MI.getOperand(0).getReg(); 926 927 // A sub-register definition can only be rematerialized if the instruction 928 // doesn't read the other parts of the register. Otherwise it is really a 929 // read-modify-write operation on the full virtual register which cannot be 930 // moved safely. 931 if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() && 932 MI.readsVirtualRegister(DefReg)) 933 return false; 934 935 // A load from a fixed stack slot can be rematerialized. This may be 936 // redundant with subsequent checks, but it's target-independent, 937 // simple, and a common case. 938 int FrameIdx = 0; 939 if (isLoadFromStackSlot(MI, FrameIdx) && 940 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx)) 941 return true; 942 943 // Avoid instructions obviously unsafe for remat. 944 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() || 945 MI.hasUnmodeledSideEffects()) 946 return false; 947 948 // Don't remat inline asm. We have no idea how expensive it is 949 // even if it's side effect free. 950 if (MI.isInlineAsm()) 951 return false; 952 953 // Avoid instructions which load from potentially varying memory. 954 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA)) 955 return false; 956 957 // If any of the registers accessed are non-constant, conservatively assume 958 // the instruction is not rematerializable. 959 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 960 const MachineOperand &MO = MI.getOperand(i); 961 if (!MO.isReg()) continue; 962 Register Reg = MO.getReg(); 963 if (Reg == 0) 964 continue; 965 966 // Check for a well-behaved physical register. 967 if (Register::isPhysicalRegister(Reg)) { 968 if (MO.isUse()) { 969 // If the physreg has no defs anywhere, it's just an ambient register 970 // and we can freely move its uses. Alternatively, if it's allocatable, 971 // it could get allocated to something with a def during allocation. 972 if (!MRI.isConstantPhysReg(Reg)) 973 return false; 974 } else { 975 // A physreg def. We can't remat it. 976 return false; 977 } 978 continue; 979 } 980 981 // Only allow one virtual-register def. There may be multiple defs of the 982 // same virtual register, though. 983 if (MO.isDef() && Reg != DefReg) 984 return false; 985 986 // Don't allow any virtual-register uses. Rematting an instruction with 987 // virtual register uses would length the live ranges of the uses, which 988 // is not necessarily a good idea, certainly not "trivial". 989 if (MO.isUse()) 990 return false; 991 } 992 993 // Everything checked out. 994 return true; 995 } 996 997 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { 998 const MachineFunction *MF = MI.getMF(); 999 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 1000 bool StackGrowsDown = 1001 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 1002 1003 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 1004 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 1005 1006 if (!isFrameInstr(MI)) 1007 return 0; 1008 1009 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI)); 1010 1011 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || 1012 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) 1013 SPAdj = -SPAdj; 1014 1015 return SPAdj; 1016 } 1017 1018 /// isSchedulingBoundary - Test if the given instruction should be 1019 /// considered a scheduling boundary. This primarily includes labels 1020 /// and terminators. 1021 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 1022 const MachineBasicBlock *MBB, 1023 const MachineFunction &MF) const { 1024 // Terminators and labels can't be scheduled around. 1025 if (MI.isTerminator() || MI.isPosition()) 1026 return true; 1027 1028 // INLINEASM_BR can jump to another block 1029 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 1030 return true; 1031 1032 // Don't attempt to schedule around any instruction that defines 1033 // a stack-oriented pointer, as it's unlikely to be profitable. This 1034 // saves compile time, because it doesn't require every single 1035 // stack slot reference to depend on the instruction that does the 1036 // modification. 1037 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 1038 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1039 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); 1040 } 1041 1042 // Provide a global flag for disabling the PreRA hazard recognizer that targets 1043 // may choose to honor. 1044 bool TargetInstrInfo::usePreRAHazardRecognizer() const { 1045 return !DisableHazardRecognizer; 1046 } 1047 1048 // Default implementation of CreateTargetRAHazardRecognizer. 1049 ScheduleHazardRecognizer *TargetInstrInfo:: 1050 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 1051 const ScheduleDAG *DAG) const { 1052 // Dummy hazard recognizer allows all instructions to issue. 1053 return new ScheduleHazardRecognizer(); 1054 } 1055 1056 // Default implementation of CreateTargetMIHazardRecognizer. 1057 ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer( 1058 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const { 1059 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler"); 1060 } 1061 1062 // Default implementation of CreateTargetPostRAHazardRecognizer. 1063 ScheduleHazardRecognizer *TargetInstrInfo:: 1064 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 1065 const ScheduleDAG *DAG) const { 1066 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); 1067 } 1068 1069 // Default implementation of getMemOperandWithOffset. 1070 bool TargetInstrInfo::getMemOperandWithOffset( 1071 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, 1072 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const { 1073 SmallVector<const MachineOperand *, 4> BaseOps; 1074 unsigned Width; 1075 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable, 1076 Width, TRI) || 1077 BaseOps.size() != 1) 1078 return false; 1079 BaseOp = BaseOps.front(); 1080 return true; 1081 } 1082 1083 //===----------------------------------------------------------------------===// 1084 // SelectionDAG latency interface. 1085 //===----------------------------------------------------------------------===// 1086 1087 int 1088 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1089 SDNode *DefNode, unsigned DefIdx, 1090 SDNode *UseNode, unsigned UseIdx) const { 1091 if (!ItinData || ItinData->isEmpty()) 1092 return -1; 1093 1094 if (!DefNode->isMachineOpcode()) 1095 return -1; 1096 1097 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); 1098 if (!UseNode->isMachineOpcode()) 1099 return ItinData->getOperandCycle(DefClass, DefIdx); 1100 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); 1101 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1102 } 1103 1104 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1105 SDNode *N) const { 1106 if (!ItinData || ItinData->isEmpty()) 1107 return 1; 1108 1109 if (!N->isMachineOpcode()) 1110 return 1; 1111 1112 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); 1113 } 1114 1115 //===----------------------------------------------------------------------===// 1116 // MachineInstr latency interface. 1117 //===----------------------------------------------------------------------===// 1118 1119 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 1120 const MachineInstr &MI) const { 1121 if (!ItinData || ItinData->isEmpty()) 1122 return 1; 1123 1124 unsigned Class = MI.getDesc().getSchedClass(); 1125 int UOps = ItinData->Itineraries[Class].NumMicroOps; 1126 if (UOps >= 0) 1127 return UOps; 1128 1129 // The # of u-ops is dynamically determined. The specific target should 1130 // override this function to return the right number. 1131 return 1; 1132 } 1133 1134 /// Return the default expected latency for a def based on it's opcode. 1135 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, 1136 const MachineInstr &DefMI) const { 1137 if (DefMI.isTransient()) 1138 return 0; 1139 if (DefMI.mayLoad()) 1140 return SchedModel.LoadLatency; 1141 if (isHighLatencyDef(DefMI.getOpcode())) 1142 return SchedModel.HighLatency; 1143 return 1; 1144 } 1145 1146 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { 1147 return 0; 1148 } 1149 1150 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 1151 const MachineInstr &MI, 1152 unsigned *PredCost) const { 1153 // Default to one cycle for no itinerary. However, an "empty" itinerary may 1154 // still have a MinLatency property, which getStageLatency checks. 1155 if (!ItinData) 1156 return MI.mayLoad() ? 2 : 1; 1157 1158 return ItinData->getStageLatency(MI.getDesc().getSchedClass()); 1159 } 1160 1161 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 1162 const MachineInstr &DefMI, 1163 unsigned DefIdx) const { 1164 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 1165 if (!ItinData || ItinData->isEmpty()) 1166 return false; 1167 1168 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1169 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 1170 return (DefCycle != -1 && DefCycle <= 1); 1171 } 1172 1173 Optional<ParamLoadedValue> 1174 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI, 1175 Register Reg) const { 1176 const MachineFunction *MF = MI.getMF(); 1177 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 1178 DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {}); 1179 int64_t Offset; 1180 bool OffsetIsScalable; 1181 1182 // To simplify the sub-register handling, verify that we only need to 1183 // consider physical registers. 1184 assert(MF->getProperties().hasProperty( 1185 MachineFunctionProperties::Property::NoVRegs)); 1186 1187 if (auto DestSrc = isCopyInstr(MI)) { 1188 Register DestReg = DestSrc->Destination->getReg(); 1189 1190 // If the copy destination is the forwarding reg, describe the forwarding 1191 // reg using the copy source as the backup location. Example: 1192 // 1193 // x0 = MOV x7 1194 // call callee(x0) ; x0 described as x7 1195 if (Reg == DestReg) 1196 return ParamLoadedValue(*DestSrc->Source, Expr); 1197 1198 // Cases where super- or sub-registers needs to be described should 1199 // be handled by the target's hook implementation. 1200 assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) && 1201 "TargetInstrInfo::describeLoadedValue can't describe super- or " 1202 "sub-regs for copy instructions"); 1203 return None; 1204 } else if (auto RegImm = isAddImmediate(MI, Reg)) { 1205 Register SrcReg = RegImm->Reg; 1206 Offset = RegImm->Imm; 1207 Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset); 1208 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr); 1209 } else if (MI.hasOneMemOperand()) { 1210 // Only describe memory which provably does not escape the function. As 1211 // described in llvm.org/PR43343, escaped memory may be clobbered by the 1212 // callee (or by another thread). 1213 const auto &TII = MF->getSubtarget().getInstrInfo(); 1214 const MachineFrameInfo &MFI = MF->getFrameInfo(); 1215 const MachineMemOperand *MMO = MI.memoperands()[0]; 1216 const PseudoSourceValue *PSV = MMO->getPseudoValue(); 1217 1218 // If the address points to "special" memory (e.g. a spill slot), it's 1219 // sufficient to check that it isn't aliased by any high-level IR value. 1220 if (!PSV || PSV->mayAlias(&MFI)) 1221 return None; 1222 1223 const MachineOperand *BaseOp; 1224 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, 1225 TRI)) 1226 return None; 1227 1228 // FIXME: Scalable offsets are not yet handled in the offset code below. 1229 if (OffsetIsScalable) 1230 return None; 1231 1232 // TODO: Can currently only handle mem instructions with a single define. 1233 // An example from the x86 target: 1234 // ... 1235 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx 1236 // ... 1237 // 1238 if (MI.getNumExplicitDefs() != 1) 1239 return None; 1240 1241 // TODO: In what way do we need to take Reg into consideration here? 1242 1243 SmallVector<uint64_t, 8> Ops; 1244 DIExpression::appendOffset(Ops, Offset); 1245 Ops.push_back(dwarf::DW_OP_deref_size); 1246 Ops.push_back(MMO->getSize()); 1247 Expr = DIExpression::prependOpcodes(Expr, Ops); 1248 return ParamLoadedValue(*BaseOp, Expr); 1249 } 1250 1251 return None; 1252 } 1253 1254 /// Both DefMI and UseMI must be valid. By default, call directly to the 1255 /// itinerary. This may be overriden by the target. 1256 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 1257 const MachineInstr &DefMI, 1258 unsigned DefIdx, 1259 const MachineInstr &UseMI, 1260 unsigned UseIdx) const { 1261 unsigned DefClass = DefMI.getDesc().getSchedClass(); 1262 unsigned UseClass = UseMI.getDesc().getSchedClass(); 1263 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 1264 } 1265 1266 /// If we can determine the operand latency from the def only, without itinerary 1267 /// lookup, do so. Otherwise return -1. 1268 int TargetInstrInfo::computeDefOperandLatency( 1269 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const { 1270 1271 // Let the target hook getInstrLatency handle missing itineraries. 1272 if (!ItinData) 1273 return getInstrLatency(ItinData, DefMI); 1274 1275 if(ItinData->isEmpty()) 1276 return defaultDefLatency(ItinData->SchedModel, DefMI); 1277 1278 // ...operand lookup required 1279 return -1; 1280 } 1281 1282 bool TargetInstrInfo::getRegSequenceInputs( 1283 const MachineInstr &MI, unsigned DefIdx, 1284 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 1285 assert((MI.isRegSequence() || 1286 MI.isRegSequenceLike()) && "Instruction do not have the proper type"); 1287 1288 if (!MI.isRegSequence()) 1289 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs); 1290 1291 // We are looking at: 1292 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ... 1293 assert(DefIdx == 0 && "REG_SEQUENCE only has one def"); 1294 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx; 1295 OpIdx += 2) { 1296 const MachineOperand &MOReg = MI.getOperand(OpIdx); 1297 if (MOReg.isUndef()) 1298 continue; 1299 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1); 1300 assert(MOSubIdx.isImm() && 1301 "One of the subindex of the reg_sequence is not an immediate"); 1302 // Record Reg:SubReg, SubIdx. 1303 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(), 1304 (unsigned)MOSubIdx.getImm())); 1305 } 1306 return true; 1307 } 1308 1309 bool TargetInstrInfo::getExtractSubregInputs( 1310 const MachineInstr &MI, unsigned DefIdx, 1311 RegSubRegPairAndIdx &InputReg) const { 1312 assert((MI.isExtractSubreg() || 1313 MI.isExtractSubregLike()) && "Instruction do not have the proper type"); 1314 1315 if (!MI.isExtractSubreg()) 1316 return getExtractSubregLikeInputs(MI, DefIdx, InputReg); 1317 1318 // We are looking at: 1319 // Def = EXTRACT_SUBREG v0.sub1, sub0. 1320 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def"); 1321 const MachineOperand &MOReg = MI.getOperand(1); 1322 if (MOReg.isUndef()) 1323 return false; 1324 const MachineOperand &MOSubIdx = MI.getOperand(2); 1325 assert(MOSubIdx.isImm() && 1326 "The subindex of the extract_subreg is not an immediate"); 1327 1328 InputReg.Reg = MOReg.getReg(); 1329 InputReg.SubReg = MOReg.getSubReg(); 1330 InputReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1331 return true; 1332 } 1333 1334 bool TargetInstrInfo::getInsertSubregInputs( 1335 const MachineInstr &MI, unsigned DefIdx, 1336 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const { 1337 assert((MI.isInsertSubreg() || 1338 MI.isInsertSubregLike()) && "Instruction do not have the proper type"); 1339 1340 if (!MI.isInsertSubreg()) 1341 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg); 1342 1343 // We are looking at: 1344 // Def = INSERT_SEQUENCE v0, v1, sub0. 1345 assert(DefIdx == 0 && "INSERT_SUBREG only has one def"); 1346 const MachineOperand &MOBaseReg = MI.getOperand(1); 1347 const MachineOperand &MOInsertedReg = MI.getOperand(2); 1348 if (MOInsertedReg.isUndef()) 1349 return false; 1350 const MachineOperand &MOSubIdx = MI.getOperand(3); 1351 assert(MOSubIdx.isImm() && 1352 "One of the subindex of the reg_sequence is not an immediate"); 1353 BaseReg.Reg = MOBaseReg.getReg(); 1354 BaseReg.SubReg = MOBaseReg.getSubReg(); 1355 1356 InsertedReg.Reg = MOInsertedReg.getReg(); 1357 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 1358 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm(); 1359 return true; 1360 } 1361 1362 // Returns a MIRPrinter comment for this machine operand. 1363 std::string TargetInstrInfo::createMIROperandComment( 1364 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, 1365 const TargetRegisterInfo *TRI) const { 1366 1367 if (!MI.isInlineAsm()) 1368 return ""; 1369 1370 std::string Flags; 1371 raw_string_ostream OS(Flags); 1372 1373 if (OpIdx == InlineAsm::MIOp_ExtraInfo) { 1374 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack 1375 unsigned ExtraInfo = Op.getImm(); 1376 bool First = true; 1377 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) { 1378 if (!First) 1379 OS << " "; 1380 First = false; 1381 OS << Info; 1382 } 1383 1384 return OS.str(); 1385 } 1386 1387 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx); 1388 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx) 1389 return ""; 1390 1391 assert(Op.isImm() && "Expected flag operand to be an immediate"); 1392 // Pretty print the inline asm operand descriptor. 1393 unsigned Flag = Op.getImm(); 1394 unsigned Kind = InlineAsm::getKind(Flag); 1395 OS << InlineAsm::getKindName(Kind); 1396 1397 unsigned RCID = 0; 1398 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && 1399 InlineAsm::hasRegClassConstraint(Flag, RCID)) { 1400 if (TRI) { 1401 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); 1402 } else 1403 OS << ":RC" << RCID; 1404 } 1405 1406 if (InlineAsm::isMemKind(Flag)) { 1407 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); 1408 OS << ":" << InlineAsm::getMemConstraintName(MCID); 1409 } 1410 1411 unsigned TiedTo = 0; 1412 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) 1413 OS << " tiedto:$" << TiedTo; 1414 1415 return OS.str(); 1416 } 1417 1418 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {} 1419